2007-03-25 Paul Brook <paul@codesourcery.com>
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #define WARN_DEPRECATED 1
46
47 #ifdef OBJ_ELF
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
50
51 /* This structure holds the unwinding state. */
52
53 static struct
54 {
55 symbolS * proc_start;
56 symbolS * table_entry;
57 symbolS * personality_routine;
58 int personality_index;
59 /* The segment containing the function. */
60 segT saved_seg;
61 subsegT saved_subseg;
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes;
64 int opcode_count;
65 int opcode_alloc;
66 /* The number of bytes pushed to the stack. */
67 offsetT frame_size;
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
74 offsetT fp_offset;
75 int fp_reg;
76 /* Nonzero if an unwind_setfp directive has been seen. */
77 unsigned fp_used:1;
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored:1;
80 } unwind;
81
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency = 0;
86
87 #endif /* OBJ_ELF */
88
89 /* Results from operand parsing worker functions. */
90
91 typedef enum
92 {
93 PARSE_OPERAND_SUCCESS,
94 PARSE_OPERAND_FAIL,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result;
97
98 enum arm_float_abi
99 {
100 ARM_FLOAT_ABI_HARD,
101 ARM_FLOAT_ABI_SOFTFP,
102 ARM_FLOAT_ABI_SOFT
103 };
104
105 /* Types of processor to assemble for. */
106 #ifndef CPU_DEFAULT
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
109 #else
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
112 #endif
113 #endif
114 #endif
115
116 #ifndef FPU_DEFAULT
117 # ifdef TE_LINUX
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
120 # ifdef OBJ_ELF
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
122 # else
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
125 # endif
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
128 # else
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
131 # endif
132 #endif /* ifndef FPU_DEFAULT */
133
134 #define streq(a, b) (strcmp (a, b) == 0)
135
136 static arm_feature_set cpu_variant;
137 static arm_feature_set arm_arch_used;
138 static arm_feature_set thumb_arch_used;
139
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26 = FALSE;
142 static int atpcs = FALSE;
143 static int support_interwork = FALSE;
144 static int uses_apcs_float = FALSE;
145 static int pic_code = FALSE;
146
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
149 assembly flags. */
150 static const arm_feature_set *legacy_cpu = NULL;
151 static const arm_feature_set *legacy_fpu = NULL;
152
153 static const arm_feature_set *mcpu_cpu_opt = NULL;
154 static const arm_feature_set *mcpu_fpu_opt = NULL;
155 static const arm_feature_set *march_cpu_opt = NULL;
156 static const arm_feature_set *march_fpu_opt = NULL;
157 static const arm_feature_set *mfpu_opt = NULL;
158 static const arm_feature_set *object_arch = NULL;
159
160 /* Constants for known architecture features. */
161 static const arm_feature_set fpu_default = FPU_DEFAULT;
162 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
163 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
164 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
165 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
166 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
167 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
168 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
169 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
170
171 #ifdef CPU_DEFAULT
172 static const arm_feature_set cpu_default = CPU_DEFAULT;
173 #endif
174
175 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
176 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
177 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
178 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
179 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
180 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
181 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
182 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
183 static const arm_feature_set arm_ext_v4t_5 =
184 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
185 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
186 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
187 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
188 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
189 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
190 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
191 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
192 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
193 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
194 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
195 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
196 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
197 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
198 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
199
200 static const arm_feature_set arm_arch_any = ARM_ANY;
201 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
203 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
204
205 static const arm_feature_set arm_cext_iwmmxt2 =
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
207 static const arm_feature_set arm_cext_iwmmxt =
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
209 static const arm_feature_set arm_cext_xscale =
210 ARM_FEATURE (0, ARM_CEXT_XSCALE);
211 static const arm_feature_set arm_cext_maverick =
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
213 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
214 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
215 static const arm_feature_set fpu_vfp_ext_v1xd =
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
217 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
218 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
219 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
220 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
221 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
222 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
223
224 static int mfloat_abi_opt = -1;
225 /* Record user cpu selection for object attributes. */
226 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
227 /* Must be long enough to hold any of the names in arm_cpus. */
228 static char selected_cpu_name[16];
229 #ifdef OBJ_ELF
230 # ifdef EABI_DEFAULT
231 static int meabi_flags = EABI_DEFAULT;
232 # else
233 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
234 # endif
235
236 bfd_boolean
237 arm_is_eabi(void)
238 {
239 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
240 }
241 #endif
242
243 #ifdef OBJ_ELF
244 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
245 symbolS * GOT_symbol;
246 #endif
247
248 /* 0: assemble for ARM,
249 1: assemble for Thumb,
250 2: assemble for Thumb even though target CPU does not support thumb
251 instructions. */
252 static int thumb_mode = 0;
253
254 /* If unified_syntax is true, we are processing the new unified
255 ARM/Thumb syntax. Important differences from the old ARM mode:
256
257 - Immediate operands do not require a # prefix.
258 - Conditional affixes always appear at the end of the
259 instruction. (For backward compatibility, those instructions
260 that formerly had them in the middle, continue to accept them
261 there.)
262 - The IT instruction may appear, and if it does is validated
263 against subsequent conditional affixes. It does not generate
264 machine code.
265
266 Important differences from the old Thumb mode:
267
268 - Immediate operands do not require a # prefix.
269 - Most of the V6T2 instructions are only available in unified mode.
270 - The .N and .W suffixes are recognized and honored (it is an error
271 if they cannot be honored).
272 - All instructions set the flags if and only if they have an 's' affix.
273 - Conditional affixes may be used. They are validated against
274 preceding IT instructions. Unlike ARM mode, you cannot use a
275 conditional affix except in the scope of an IT instruction. */
276
277 static bfd_boolean unified_syntax = FALSE;
278
279 enum neon_el_type
280 {
281 NT_invtype,
282 NT_untyped,
283 NT_integer,
284 NT_float,
285 NT_poly,
286 NT_signed,
287 NT_unsigned
288 };
289
290 struct neon_type_el
291 {
292 enum neon_el_type type;
293 unsigned size;
294 };
295
296 #define NEON_MAX_TYPE_ELS 4
297
298 struct neon_type
299 {
300 struct neon_type_el el[NEON_MAX_TYPE_ELS];
301 unsigned elems;
302 };
303
304 struct arm_it
305 {
306 const char * error;
307 unsigned long instruction;
308 int size;
309 int size_req;
310 int cond;
311 /* "uncond_value" is set to the value in place of the conditional field in
312 unconditional versions of the instruction, or -1 if nothing is
313 appropriate. */
314 int uncond_value;
315 struct neon_type vectype;
316 /* Set to the opcode if the instruction needs relaxation.
317 Zero if the instruction is not relaxed. */
318 unsigned long relax;
319 struct
320 {
321 bfd_reloc_code_real_type type;
322 expressionS exp;
323 int pc_rel;
324 } reloc;
325
326 struct
327 {
328 unsigned reg;
329 signed int imm;
330 struct neon_type_el vectype;
331 unsigned present : 1; /* Operand present. */
332 unsigned isreg : 1; /* Operand was a register. */
333 unsigned immisreg : 1; /* .imm field is a second register. */
334 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
335 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
336 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
337 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
338 instructions. This allows us to disambiguate ARM <-> vector insns. */
339 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
340 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
341 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
342 unsigned issingle : 1; /* Operand is VFP single-precision register. */
343 unsigned hasreloc : 1; /* Operand has relocation suffix. */
344 unsigned writeback : 1; /* Operand has trailing ! */
345 unsigned preind : 1; /* Preindexed address. */
346 unsigned postind : 1; /* Postindexed address. */
347 unsigned negative : 1; /* Index register was negated. */
348 unsigned shifted : 1; /* Shift applied to operation. */
349 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
350 } operands[6];
351 };
352
353 static struct arm_it inst;
354
355 #define NUM_FLOAT_VALS 8
356
357 const char * fp_const[] =
358 {
359 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
360 };
361
362 /* Number of littlenums required to hold an extended precision number. */
363 #define MAX_LITTLENUMS 6
364
365 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
366
367 #define FAIL (-1)
368 #define SUCCESS (0)
369
370 #define SUFF_S 1
371 #define SUFF_D 2
372 #define SUFF_E 3
373 #define SUFF_P 4
374
375 #define CP_T_X 0x00008000
376 #define CP_T_Y 0x00400000
377
378 #define CONDS_BIT 0x00100000
379 #define LOAD_BIT 0x00100000
380
381 #define DOUBLE_LOAD_FLAG 0x00000001
382
383 struct asm_cond
384 {
385 const char * template;
386 unsigned long value;
387 };
388
389 #define COND_ALWAYS 0xE
390
391 struct asm_psr
392 {
393 const char *template;
394 unsigned long field;
395 };
396
397 struct asm_barrier_opt
398 {
399 const char *template;
400 unsigned long value;
401 };
402
403 /* The bit that distinguishes CPSR and SPSR. */
404 #define SPSR_BIT (1 << 22)
405
406 /* The individual PSR flag bits. */
407 #define PSR_c (1 << 16)
408 #define PSR_x (1 << 17)
409 #define PSR_s (1 << 18)
410 #define PSR_f (1 << 19)
411
412 struct reloc_entry
413 {
414 char *name;
415 bfd_reloc_code_real_type reloc;
416 };
417
418 enum vfp_reg_pos
419 {
420 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
421 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
422 };
423
424 enum vfp_ldstm_type
425 {
426 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
427 };
428
429 /* Bits for DEFINED field in neon_typed_alias. */
430 #define NTA_HASTYPE 1
431 #define NTA_HASINDEX 2
432
433 struct neon_typed_alias
434 {
435 unsigned char defined;
436 unsigned char index;
437 struct neon_type_el eltype;
438 };
439
440 /* ARM register categories. This includes coprocessor numbers and various
441 architecture extensions' registers. */
442 enum arm_reg_type
443 {
444 REG_TYPE_RN,
445 REG_TYPE_CP,
446 REG_TYPE_CN,
447 REG_TYPE_FN,
448 REG_TYPE_VFS,
449 REG_TYPE_VFD,
450 REG_TYPE_NQ,
451 REG_TYPE_VFSD,
452 REG_TYPE_NDQ,
453 REG_TYPE_NSDQ,
454 REG_TYPE_VFC,
455 REG_TYPE_MVF,
456 REG_TYPE_MVD,
457 REG_TYPE_MVFX,
458 REG_TYPE_MVDX,
459 REG_TYPE_MVAX,
460 REG_TYPE_DSPSC,
461 REG_TYPE_MMXWR,
462 REG_TYPE_MMXWC,
463 REG_TYPE_MMXWCG,
464 REG_TYPE_XSCALE,
465 };
466
467 /* Structure for a hash table entry for a register.
468 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
469 information which states whether a vector type or index is specified (for a
470 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
471 struct reg_entry
472 {
473 const char *name;
474 unsigned char number;
475 unsigned char type;
476 unsigned char builtin;
477 struct neon_typed_alias *neon;
478 };
479
480 /* Diagnostics used when we don't get a register of the expected type. */
481 const char *const reg_expected_msgs[] =
482 {
483 N_("ARM register expected"),
484 N_("bad or missing co-processor number"),
485 N_("co-processor register expected"),
486 N_("FPA register expected"),
487 N_("VFP single precision register expected"),
488 N_("VFP/Neon double precision register expected"),
489 N_("Neon quad precision register expected"),
490 N_("VFP single or double precision register expected"),
491 N_("Neon double or quad precision register expected"),
492 N_("VFP single, double or Neon quad precision register expected"),
493 N_("VFP system register expected"),
494 N_("Maverick MVF register expected"),
495 N_("Maverick MVD register expected"),
496 N_("Maverick MVFX register expected"),
497 N_("Maverick MVDX register expected"),
498 N_("Maverick MVAX register expected"),
499 N_("Maverick DSPSC register expected"),
500 N_("iWMMXt data register expected"),
501 N_("iWMMXt control register expected"),
502 N_("iWMMXt scalar register expected"),
503 N_("XScale accumulator register expected"),
504 };
505
506 /* Some well known registers that we refer to directly elsewhere. */
507 #define REG_SP 13
508 #define REG_LR 14
509 #define REG_PC 15
510
511 /* ARM instructions take 4bytes in the object file, Thumb instructions
512 take 2: */
513 #define INSN_SIZE 4
514
515 struct asm_opcode
516 {
517 /* Basic string to match. */
518 const char *template;
519
520 /* Parameters to instruction. */
521 unsigned char operands[8];
522
523 /* Conditional tag - see opcode_lookup. */
524 unsigned int tag : 4;
525
526 /* Basic instruction code. */
527 unsigned int avalue : 28;
528
529 /* Thumb-format instruction code. */
530 unsigned int tvalue;
531
532 /* Which architecture variant provides this instruction. */
533 const arm_feature_set *avariant;
534 const arm_feature_set *tvariant;
535
536 /* Function to call to encode instruction in ARM format. */
537 void (* aencode) (void);
538
539 /* Function to call to encode instruction in Thumb format. */
540 void (* tencode) (void);
541 };
542
543 /* Defines for various bits that we will want to toggle. */
544 #define INST_IMMEDIATE 0x02000000
545 #define OFFSET_REG 0x02000000
546 #define HWOFFSET_IMM 0x00400000
547 #define SHIFT_BY_REG 0x00000010
548 #define PRE_INDEX 0x01000000
549 #define INDEX_UP 0x00800000
550 #define WRITE_BACK 0x00200000
551 #define LDM_TYPE_2_OR_3 0x00400000
552 #define CPSI_MMOD 0x00020000
553
554 #define LITERAL_MASK 0xf000f000
555 #define OPCODE_MASK 0xfe1fffff
556 #define V4_STR_BIT 0x00000020
557
558 #define T2_SUBS_PC_LR 0xf3de8f00
559
560 #define DATA_OP_SHIFT 21
561
562 #define T2_OPCODE_MASK 0xfe1fffff
563 #define T2_DATA_OP_SHIFT 21
564
565 /* Codes to distinguish the arithmetic instructions. */
566 #define OPCODE_AND 0
567 #define OPCODE_EOR 1
568 #define OPCODE_SUB 2
569 #define OPCODE_RSB 3
570 #define OPCODE_ADD 4
571 #define OPCODE_ADC 5
572 #define OPCODE_SBC 6
573 #define OPCODE_RSC 7
574 #define OPCODE_TST 8
575 #define OPCODE_TEQ 9
576 #define OPCODE_CMP 10
577 #define OPCODE_CMN 11
578 #define OPCODE_ORR 12
579 #define OPCODE_MOV 13
580 #define OPCODE_BIC 14
581 #define OPCODE_MVN 15
582
583 #define T2_OPCODE_AND 0
584 #define T2_OPCODE_BIC 1
585 #define T2_OPCODE_ORR 2
586 #define T2_OPCODE_ORN 3
587 #define T2_OPCODE_EOR 4
588 #define T2_OPCODE_ADD 8
589 #define T2_OPCODE_ADC 10
590 #define T2_OPCODE_SBC 11
591 #define T2_OPCODE_SUB 13
592 #define T2_OPCODE_RSB 14
593
594 #define T_OPCODE_MUL 0x4340
595 #define T_OPCODE_TST 0x4200
596 #define T_OPCODE_CMN 0x42c0
597 #define T_OPCODE_NEG 0x4240
598 #define T_OPCODE_MVN 0x43c0
599
600 #define T_OPCODE_ADD_R3 0x1800
601 #define T_OPCODE_SUB_R3 0x1a00
602 #define T_OPCODE_ADD_HI 0x4400
603 #define T_OPCODE_ADD_ST 0xb000
604 #define T_OPCODE_SUB_ST 0xb080
605 #define T_OPCODE_ADD_SP 0xa800
606 #define T_OPCODE_ADD_PC 0xa000
607 #define T_OPCODE_ADD_I8 0x3000
608 #define T_OPCODE_SUB_I8 0x3800
609 #define T_OPCODE_ADD_I3 0x1c00
610 #define T_OPCODE_SUB_I3 0x1e00
611
612 #define T_OPCODE_ASR_R 0x4100
613 #define T_OPCODE_LSL_R 0x4080
614 #define T_OPCODE_LSR_R 0x40c0
615 #define T_OPCODE_ROR_R 0x41c0
616 #define T_OPCODE_ASR_I 0x1000
617 #define T_OPCODE_LSL_I 0x0000
618 #define T_OPCODE_LSR_I 0x0800
619
620 #define T_OPCODE_MOV_I8 0x2000
621 #define T_OPCODE_CMP_I8 0x2800
622 #define T_OPCODE_CMP_LR 0x4280
623 #define T_OPCODE_MOV_HR 0x4600
624 #define T_OPCODE_CMP_HR 0x4500
625
626 #define T_OPCODE_LDR_PC 0x4800
627 #define T_OPCODE_LDR_SP 0x9800
628 #define T_OPCODE_STR_SP 0x9000
629 #define T_OPCODE_LDR_IW 0x6800
630 #define T_OPCODE_STR_IW 0x6000
631 #define T_OPCODE_LDR_IH 0x8800
632 #define T_OPCODE_STR_IH 0x8000
633 #define T_OPCODE_LDR_IB 0x7800
634 #define T_OPCODE_STR_IB 0x7000
635 #define T_OPCODE_LDR_RW 0x5800
636 #define T_OPCODE_STR_RW 0x5000
637 #define T_OPCODE_LDR_RH 0x5a00
638 #define T_OPCODE_STR_RH 0x5200
639 #define T_OPCODE_LDR_RB 0x5c00
640 #define T_OPCODE_STR_RB 0x5400
641
642 #define T_OPCODE_PUSH 0xb400
643 #define T_OPCODE_POP 0xbc00
644
645 #define T_OPCODE_BRANCH 0xe000
646
647 #define THUMB_SIZE 2 /* Size of thumb instruction. */
648 #define THUMB_PP_PC_LR 0x0100
649 #define THUMB_LOAD_BIT 0x0800
650 #define THUMB2_LOAD_BIT 0x00100000
651
652 #define BAD_ARGS _("bad arguments to instruction")
653 #define BAD_PC _("r15 not allowed here")
654 #define BAD_COND _("instruction cannot be conditional")
655 #define BAD_OVERLAP _("registers may not be the same")
656 #define BAD_HIREG _("lo register required")
657 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
658 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
659 #define BAD_BRANCH _("branch must be last instruction in IT block")
660 #define BAD_NOT_IT _("instruction not allowed in IT block")
661 #define BAD_FPU _("selected FPU does not support instruction")
662
663 static struct hash_control *arm_ops_hsh;
664 static struct hash_control *arm_cond_hsh;
665 static struct hash_control *arm_shift_hsh;
666 static struct hash_control *arm_psr_hsh;
667 static struct hash_control *arm_v7m_psr_hsh;
668 static struct hash_control *arm_reg_hsh;
669 static struct hash_control *arm_reloc_hsh;
670 static struct hash_control *arm_barrier_opt_hsh;
671
672 /* Stuff needed to resolve the label ambiguity
673 As:
674 ...
675 label: <insn>
676 may differ from:
677 ...
678 label:
679 <insn>
680 */
681
682 symbolS * last_label_seen;
683 static int label_is_thumb_function_name = FALSE;
684 \f
685 /* Literal pool structure. Held on a per-section
686 and per-sub-section basis. */
687
688 #define MAX_LITERAL_POOL_SIZE 1024
689 typedef struct literal_pool
690 {
691 expressionS literals [MAX_LITERAL_POOL_SIZE];
692 unsigned int next_free_entry;
693 unsigned int id;
694 symbolS * symbol;
695 segT section;
696 subsegT sub_section;
697 struct literal_pool * next;
698 } literal_pool;
699
700 /* Pointer to a linked list of literal pools. */
701 literal_pool * list_of_pools = NULL;
702
703 /* State variables for IT block handling. */
704 static bfd_boolean current_it_mask = 0;
705 static int current_cc;
706
707 \f
708 /* Pure syntax. */
709
710 /* This array holds the chars that always start a comment. If the
711 pre-processor is disabled, these aren't very useful. */
712 const char comment_chars[] = "@";
713
714 /* This array holds the chars that only start a comment at the beginning of
715 a line. If the line seems to have the form '# 123 filename'
716 .line and .file directives will appear in the pre-processed output. */
717 /* Note that input_file.c hand checks for '#' at the beginning of the
718 first line of the input file. This is because the compiler outputs
719 #NO_APP at the beginning of its output. */
720 /* Also note that comments like this one will always work. */
721 const char line_comment_chars[] = "#";
722
723 const char line_separator_chars[] = ";";
724
725 /* Chars that can be used to separate mant
726 from exp in floating point numbers. */
727 const char EXP_CHARS[] = "eE";
728
729 /* Chars that mean this number is a floating point constant. */
730 /* As in 0f12.456 */
731 /* or 0d1.2345e12 */
732
733 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
734
735 /* Prefix characters that indicate the start of an immediate
736 value. */
737 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
738
739 /* Separator character handling. */
740
741 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
742
743 static inline int
744 skip_past_char (char ** str, char c)
745 {
746 if (**str == c)
747 {
748 (*str)++;
749 return SUCCESS;
750 }
751 else
752 return FAIL;
753 }
754 #define skip_past_comma(str) skip_past_char (str, ',')
755
756 /* Arithmetic expressions (possibly involving symbols). */
757
758 /* Return TRUE if anything in the expression is a bignum. */
759
760 static int
761 walk_no_bignums (symbolS * sp)
762 {
763 if (symbol_get_value_expression (sp)->X_op == O_big)
764 return 1;
765
766 if (symbol_get_value_expression (sp)->X_add_symbol)
767 {
768 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
769 || (symbol_get_value_expression (sp)->X_op_symbol
770 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
771 }
772
773 return 0;
774 }
775
776 static int in_my_get_expression = 0;
777
778 /* Third argument to my_get_expression. */
779 #define GE_NO_PREFIX 0
780 #define GE_IMM_PREFIX 1
781 #define GE_OPT_PREFIX 2
782 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
783 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
784 #define GE_OPT_PREFIX_BIG 3
785
786 static int
787 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
788 {
789 char * save_in;
790 segT seg;
791
792 /* In unified syntax, all prefixes are optional. */
793 if (unified_syntax)
794 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
795 : GE_OPT_PREFIX;
796
797 switch (prefix_mode)
798 {
799 case GE_NO_PREFIX: break;
800 case GE_IMM_PREFIX:
801 if (!is_immediate_prefix (**str))
802 {
803 inst.error = _("immediate expression requires a # prefix");
804 return FAIL;
805 }
806 (*str)++;
807 break;
808 case GE_OPT_PREFIX:
809 case GE_OPT_PREFIX_BIG:
810 if (is_immediate_prefix (**str))
811 (*str)++;
812 break;
813 default: abort ();
814 }
815
816 memset (ep, 0, sizeof (expressionS));
817
818 save_in = input_line_pointer;
819 input_line_pointer = *str;
820 in_my_get_expression = 1;
821 seg = expression (ep);
822 in_my_get_expression = 0;
823
824 if (ep->X_op == O_illegal)
825 {
826 /* We found a bad expression in md_operand(). */
827 *str = input_line_pointer;
828 input_line_pointer = save_in;
829 if (inst.error == NULL)
830 inst.error = _("bad expression");
831 return 1;
832 }
833
834 #ifdef OBJ_AOUT
835 if (seg != absolute_section
836 && seg != text_section
837 && seg != data_section
838 && seg != bss_section
839 && seg != undefined_section)
840 {
841 inst.error = _("bad segment");
842 *str = input_line_pointer;
843 input_line_pointer = save_in;
844 return 1;
845 }
846 #endif
847
848 /* Get rid of any bignums now, so that we don't generate an error for which
849 we can't establish a line number later on. Big numbers are never valid
850 in instructions, which is where this routine is always called. */
851 if (prefix_mode != GE_OPT_PREFIX_BIG
852 && (ep->X_op == O_big
853 || (ep->X_add_symbol
854 && (walk_no_bignums (ep->X_add_symbol)
855 || (ep->X_op_symbol
856 && walk_no_bignums (ep->X_op_symbol))))))
857 {
858 inst.error = _("invalid constant");
859 *str = input_line_pointer;
860 input_line_pointer = save_in;
861 return 1;
862 }
863
864 *str = input_line_pointer;
865 input_line_pointer = save_in;
866 return 0;
867 }
868
869 /* Turn a string in input_line_pointer into a floating point constant
870 of type TYPE, and store the appropriate bytes in *LITP. The number
871 of LITTLENUMS emitted is stored in *SIZEP. An error message is
872 returned, or NULL on OK.
873
874 Note that fp constants aren't represent in the normal way on the ARM.
875 In big endian mode, things are as expected. However, in little endian
876 mode fp constants are big-endian word-wise, and little-endian byte-wise
877 within the words. For example, (double) 1.1 in big endian mode is
878 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
879 the byte sequence 99 99 f1 3f 9a 99 99 99.
880
881 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
882
883 char *
884 md_atof (int type, char * litP, int * sizeP)
885 {
886 int prec;
887 LITTLENUM_TYPE words[MAX_LITTLENUMS];
888 char *t;
889 int i;
890
891 switch (type)
892 {
893 case 'f':
894 case 'F':
895 case 's':
896 case 'S':
897 prec = 2;
898 break;
899
900 case 'd':
901 case 'D':
902 case 'r':
903 case 'R':
904 prec = 4;
905 break;
906
907 case 'x':
908 case 'X':
909 prec = 6;
910 break;
911
912 case 'p':
913 case 'P':
914 prec = 6;
915 break;
916
917 default:
918 *sizeP = 0;
919 return _("bad call to MD_ATOF()");
920 }
921
922 t = atof_ieee (input_line_pointer, type, words);
923 if (t)
924 input_line_pointer = t;
925 *sizeP = prec * 2;
926
927 if (target_big_endian)
928 {
929 for (i = 0; i < prec; i++)
930 {
931 md_number_to_chars (litP, (valueT) words[i], 2);
932 litP += 2;
933 }
934 }
935 else
936 {
937 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
938 for (i = prec - 1; i >= 0; i--)
939 {
940 md_number_to_chars (litP, (valueT) words[i], 2);
941 litP += 2;
942 }
943 else
944 /* For a 4 byte float the order of elements in `words' is 1 0.
945 For an 8 byte float the order is 1 0 3 2. */
946 for (i = 0; i < prec; i += 2)
947 {
948 md_number_to_chars (litP, (valueT) words[i + 1], 2);
949 md_number_to_chars (litP + 2, (valueT) words[i], 2);
950 litP += 4;
951 }
952 }
953
954 return 0;
955 }
956
957 /* We handle all bad expressions here, so that we can report the faulty
958 instruction in the error message. */
959 void
960 md_operand (expressionS * expr)
961 {
962 if (in_my_get_expression)
963 expr->X_op = O_illegal;
964 }
965
966 /* Immediate values. */
967
968 /* Generic immediate-value read function for use in directives.
969 Accepts anything that 'expression' can fold to a constant.
970 *val receives the number. */
971 #ifdef OBJ_ELF
972 static int
973 immediate_for_directive (int *val)
974 {
975 expressionS exp;
976 exp.X_op = O_illegal;
977
978 if (is_immediate_prefix (*input_line_pointer))
979 {
980 input_line_pointer++;
981 expression (&exp);
982 }
983
984 if (exp.X_op != O_constant)
985 {
986 as_bad (_("expected #constant"));
987 ignore_rest_of_line ();
988 return FAIL;
989 }
990 *val = exp.X_add_number;
991 return SUCCESS;
992 }
993 #endif
994
995 /* Register parsing. */
996
997 /* Generic register parser. CCP points to what should be the
998 beginning of a register name. If it is indeed a valid register
999 name, advance CCP over it and return the reg_entry structure;
1000 otherwise return NULL. Does not issue diagnostics. */
1001
1002 static struct reg_entry *
1003 arm_reg_parse_multi (char **ccp)
1004 {
1005 char *start = *ccp;
1006 char *p;
1007 struct reg_entry *reg;
1008
1009 #ifdef REGISTER_PREFIX
1010 if (*start != REGISTER_PREFIX)
1011 return NULL;
1012 start++;
1013 #endif
1014 #ifdef OPTIONAL_REGISTER_PREFIX
1015 if (*start == OPTIONAL_REGISTER_PREFIX)
1016 start++;
1017 #endif
1018
1019 p = start;
1020 if (!ISALPHA (*p) || !is_name_beginner (*p))
1021 return NULL;
1022
1023 do
1024 p++;
1025 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1026
1027 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1028
1029 if (!reg)
1030 return NULL;
1031
1032 *ccp = p;
1033 return reg;
1034 }
1035
1036 static int
1037 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1038 enum arm_reg_type type)
1039 {
1040 /* Alternative syntaxes are accepted for a few register classes. */
1041 switch (type)
1042 {
1043 case REG_TYPE_MVF:
1044 case REG_TYPE_MVD:
1045 case REG_TYPE_MVFX:
1046 case REG_TYPE_MVDX:
1047 /* Generic coprocessor register names are allowed for these. */
1048 if (reg && reg->type == REG_TYPE_CN)
1049 return reg->number;
1050 break;
1051
1052 case REG_TYPE_CP:
1053 /* For backward compatibility, a bare number is valid here. */
1054 {
1055 unsigned long processor = strtoul (start, ccp, 10);
1056 if (*ccp != start && processor <= 15)
1057 return processor;
1058 }
1059
1060 case REG_TYPE_MMXWC:
1061 /* WC includes WCG. ??? I'm not sure this is true for all
1062 instructions that take WC registers. */
1063 if (reg && reg->type == REG_TYPE_MMXWCG)
1064 return reg->number;
1065 break;
1066
1067 default:
1068 break;
1069 }
1070
1071 return FAIL;
1072 }
1073
1074 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1075 return value is the register number or FAIL. */
1076
1077 static int
1078 arm_reg_parse (char **ccp, enum arm_reg_type type)
1079 {
1080 char *start = *ccp;
1081 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1082 int ret;
1083
1084 /* Do not allow a scalar (reg+index) to parse as a register. */
1085 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1086 return FAIL;
1087
1088 if (reg && reg->type == type)
1089 return reg->number;
1090
1091 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1092 return ret;
1093
1094 *ccp = start;
1095 return FAIL;
1096 }
1097
1098 /* Parse a Neon type specifier. *STR should point at the leading '.'
1099 character. Does no verification at this stage that the type fits the opcode
1100 properly. E.g.,
1101
1102 .i32.i32.s16
1103 .s32.f32
1104 .u16
1105
1106 Can all be legally parsed by this function.
1107
1108 Fills in neon_type struct pointer with parsed information, and updates STR
1109 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1110 type, FAIL if not. */
1111
1112 static int
1113 parse_neon_type (struct neon_type *type, char **str)
1114 {
1115 char *ptr = *str;
1116
1117 if (type)
1118 type->elems = 0;
1119
1120 while (type->elems < NEON_MAX_TYPE_ELS)
1121 {
1122 enum neon_el_type thistype = NT_untyped;
1123 unsigned thissize = -1u;
1124
1125 if (*ptr != '.')
1126 break;
1127
1128 ptr++;
1129
1130 /* Just a size without an explicit type. */
1131 if (ISDIGIT (*ptr))
1132 goto parsesize;
1133
1134 switch (TOLOWER (*ptr))
1135 {
1136 case 'i': thistype = NT_integer; break;
1137 case 'f': thistype = NT_float; break;
1138 case 'p': thistype = NT_poly; break;
1139 case 's': thistype = NT_signed; break;
1140 case 'u': thistype = NT_unsigned; break;
1141 case 'd':
1142 thistype = NT_float;
1143 thissize = 64;
1144 ptr++;
1145 goto done;
1146 default:
1147 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1148 return FAIL;
1149 }
1150
1151 ptr++;
1152
1153 /* .f is an abbreviation for .f32. */
1154 if (thistype == NT_float && !ISDIGIT (*ptr))
1155 thissize = 32;
1156 else
1157 {
1158 parsesize:
1159 thissize = strtoul (ptr, &ptr, 10);
1160
1161 if (thissize != 8 && thissize != 16 && thissize != 32
1162 && thissize != 64)
1163 {
1164 as_bad (_("bad size %d in type specifier"), thissize);
1165 return FAIL;
1166 }
1167 }
1168
1169 done:
1170 if (type)
1171 {
1172 type->el[type->elems].type = thistype;
1173 type->el[type->elems].size = thissize;
1174 type->elems++;
1175 }
1176 }
1177
1178 /* Empty/missing type is not a successful parse. */
1179 if (type->elems == 0)
1180 return FAIL;
1181
1182 *str = ptr;
1183
1184 return SUCCESS;
1185 }
1186
1187 /* Errors may be set multiple times during parsing or bit encoding
1188 (particularly in the Neon bits), but usually the earliest error which is set
1189 will be the most meaningful. Avoid overwriting it with later (cascading)
1190 errors by calling this function. */
1191
1192 static void
1193 first_error (const char *err)
1194 {
1195 if (!inst.error)
1196 inst.error = err;
1197 }
1198
1199 /* Parse a single type, e.g. ".s32", leading period included. */
1200 static int
1201 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1202 {
1203 char *str = *ccp;
1204 struct neon_type optype;
1205
1206 if (*str == '.')
1207 {
1208 if (parse_neon_type (&optype, &str) == SUCCESS)
1209 {
1210 if (optype.elems == 1)
1211 *vectype = optype.el[0];
1212 else
1213 {
1214 first_error (_("only one type should be specified for operand"));
1215 return FAIL;
1216 }
1217 }
1218 else
1219 {
1220 first_error (_("vector type expected"));
1221 return FAIL;
1222 }
1223 }
1224 else
1225 return FAIL;
1226
1227 *ccp = str;
1228
1229 return SUCCESS;
1230 }
1231
1232 /* Special meanings for indices (which have a range of 0-7), which will fit into
1233 a 4-bit integer. */
1234
1235 #define NEON_ALL_LANES 15
1236 #define NEON_INTERLEAVE_LANES 14
1237
1238 /* Parse either a register or a scalar, with an optional type. Return the
1239 register number, and optionally fill in the actual type of the register
1240 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1241 type/index information in *TYPEINFO. */
1242
1243 static int
1244 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1245 enum arm_reg_type *rtype,
1246 struct neon_typed_alias *typeinfo)
1247 {
1248 char *str = *ccp;
1249 struct reg_entry *reg = arm_reg_parse_multi (&str);
1250 struct neon_typed_alias atype;
1251 struct neon_type_el parsetype;
1252
1253 atype.defined = 0;
1254 atype.index = -1;
1255 atype.eltype.type = NT_invtype;
1256 atype.eltype.size = -1;
1257
1258 /* Try alternate syntax for some types of register. Note these are mutually
1259 exclusive with the Neon syntax extensions. */
1260 if (reg == NULL)
1261 {
1262 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1263 if (altreg != FAIL)
1264 *ccp = str;
1265 if (typeinfo)
1266 *typeinfo = atype;
1267 return altreg;
1268 }
1269
1270 /* Undo polymorphism when a set of register types may be accepted. */
1271 if ((type == REG_TYPE_NDQ
1272 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1273 || (type == REG_TYPE_VFSD
1274 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1275 || (type == REG_TYPE_NSDQ
1276 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1277 || reg->type == REG_TYPE_NQ))
1278 || (type == REG_TYPE_MMXWC
1279 && (reg->type == REG_TYPE_MMXWCG)))
1280 type = reg->type;
1281
1282 if (type != reg->type)
1283 return FAIL;
1284
1285 if (reg->neon)
1286 atype = *reg->neon;
1287
1288 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1289 {
1290 if ((atype.defined & NTA_HASTYPE) != 0)
1291 {
1292 first_error (_("can't redefine type for operand"));
1293 return FAIL;
1294 }
1295 atype.defined |= NTA_HASTYPE;
1296 atype.eltype = parsetype;
1297 }
1298
1299 if (skip_past_char (&str, '[') == SUCCESS)
1300 {
1301 if (type != REG_TYPE_VFD)
1302 {
1303 first_error (_("only D registers may be indexed"));
1304 return FAIL;
1305 }
1306
1307 if ((atype.defined & NTA_HASINDEX) != 0)
1308 {
1309 first_error (_("can't change index for operand"));
1310 return FAIL;
1311 }
1312
1313 atype.defined |= NTA_HASINDEX;
1314
1315 if (skip_past_char (&str, ']') == SUCCESS)
1316 atype.index = NEON_ALL_LANES;
1317 else
1318 {
1319 expressionS exp;
1320
1321 my_get_expression (&exp, &str, GE_NO_PREFIX);
1322
1323 if (exp.X_op != O_constant)
1324 {
1325 first_error (_("constant expression required"));
1326 return FAIL;
1327 }
1328
1329 if (skip_past_char (&str, ']') == FAIL)
1330 return FAIL;
1331
1332 atype.index = exp.X_add_number;
1333 }
1334 }
1335
1336 if (typeinfo)
1337 *typeinfo = atype;
1338
1339 if (rtype)
1340 *rtype = type;
1341
1342 *ccp = str;
1343
1344 return reg->number;
1345 }
1346
1347 /* Like arm_reg_parse, but allow allow the following extra features:
1348 - If RTYPE is non-zero, return the (possibly restricted) type of the
1349 register (e.g. Neon double or quad reg when either has been requested).
1350 - If this is a Neon vector type with additional type information, fill
1351 in the struct pointed to by VECTYPE (if non-NULL).
1352 This function will fault on encountering a scalar.
1353 */
1354
1355 static int
1356 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1357 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1358 {
1359 struct neon_typed_alias atype;
1360 char *str = *ccp;
1361 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1362
1363 if (reg == FAIL)
1364 return FAIL;
1365
1366 /* Do not allow a scalar (reg+index) to parse as a register. */
1367 if ((atype.defined & NTA_HASINDEX) != 0)
1368 {
1369 first_error (_("register operand expected, but got scalar"));
1370 return FAIL;
1371 }
1372
1373 if (vectype)
1374 *vectype = atype.eltype;
1375
1376 *ccp = str;
1377
1378 return reg;
1379 }
1380
1381 #define NEON_SCALAR_REG(X) ((X) >> 4)
1382 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1383
1384 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1385 have enough information to be able to do a good job bounds-checking. So, we
1386 just do easy checks here, and do further checks later. */
1387
1388 static int
1389 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1390 {
1391 int reg;
1392 char *str = *ccp;
1393 struct neon_typed_alias atype;
1394
1395 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1396
1397 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1398 return FAIL;
1399
1400 if (atype.index == NEON_ALL_LANES)
1401 {
1402 first_error (_("scalar must have an index"));
1403 return FAIL;
1404 }
1405 else if (atype.index >= 64 / elsize)
1406 {
1407 first_error (_("scalar index out of range"));
1408 return FAIL;
1409 }
1410
1411 if (type)
1412 *type = atype.eltype;
1413
1414 *ccp = str;
1415
1416 return reg * 16 + atype.index;
1417 }
1418
1419 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1420 static long
1421 parse_reg_list (char ** strp)
1422 {
1423 char * str = * strp;
1424 long range = 0;
1425 int another_range;
1426
1427 /* We come back here if we get ranges concatenated by '+' or '|'. */
1428 do
1429 {
1430 another_range = 0;
1431
1432 if (*str == '{')
1433 {
1434 int in_range = 0;
1435 int cur_reg = -1;
1436
1437 str++;
1438 do
1439 {
1440 int reg;
1441
1442 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1443 {
1444 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1445 return FAIL;
1446 }
1447
1448 if (in_range)
1449 {
1450 int i;
1451
1452 if (reg <= cur_reg)
1453 {
1454 first_error (_("bad range in register list"));
1455 return FAIL;
1456 }
1457
1458 for (i = cur_reg + 1; i < reg; i++)
1459 {
1460 if (range & (1 << i))
1461 as_tsktsk
1462 (_("Warning: duplicated register (r%d) in register list"),
1463 i);
1464 else
1465 range |= 1 << i;
1466 }
1467 in_range = 0;
1468 }
1469
1470 if (range & (1 << reg))
1471 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1472 reg);
1473 else if (reg <= cur_reg)
1474 as_tsktsk (_("Warning: register range not in ascending order"));
1475
1476 range |= 1 << reg;
1477 cur_reg = reg;
1478 }
1479 while (skip_past_comma (&str) != FAIL
1480 || (in_range = 1, *str++ == '-'));
1481 str--;
1482
1483 if (*str++ != '}')
1484 {
1485 first_error (_("missing `}'"));
1486 return FAIL;
1487 }
1488 }
1489 else
1490 {
1491 expressionS expr;
1492
1493 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1494 return FAIL;
1495
1496 if (expr.X_op == O_constant)
1497 {
1498 if (expr.X_add_number
1499 != (expr.X_add_number & 0x0000ffff))
1500 {
1501 inst.error = _("invalid register mask");
1502 return FAIL;
1503 }
1504
1505 if ((range & expr.X_add_number) != 0)
1506 {
1507 int regno = range & expr.X_add_number;
1508
1509 regno &= -regno;
1510 regno = (1 << regno) - 1;
1511 as_tsktsk
1512 (_("Warning: duplicated register (r%d) in register list"),
1513 regno);
1514 }
1515
1516 range |= expr.X_add_number;
1517 }
1518 else
1519 {
1520 if (inst.reloc.type != 0)
1521 {
1522 inst.error = _("expression too complex");
1523 return FAIL;
1524 }
1525
1526 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1527 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1528 inst.reloc.pc_rel = 0;
1529 }
1530 }
1531
1532 if (*str == '|' || *str == '+')
1533 {
1534 str++;
1535 another_range = 1;
1536 }
1537 }
1538 while (another_range);
1539
1540 *strp = str;
1541 return range;
1542 }
1543
1544 /* Types of registers in a list. */
1545
1546 enum reg_list_els
1547 {
1548 REGLIST_VFP_S,
1549 REGLIST_VFP_D,
1550 REGLIST_NEON_D
1551 };
1552
1553 /* Parse a VFP register list. If the string is invalid return FAIL.
1554 Otherwise return the number of registers, and set PBASE to the first
1555 register. Parses registers of type ETYPE.
1556 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1557 - Q registers can be used to specify pairs of D registers
1558 - { } can be omitted from around a singleton register list
1559 FIXME: This is not implemented, as it would require backtracking in
1560 some cases, e.g.:
1561 vtbl.8 d3,d4,d5
1562 This could be done (the meaning isn't really ambiguous), but doesn't
1563 fit in well with the current parsing framework.
1564 - 32 D registers may be used (also true for VFPv3).
1565 FIXME: Types are ignored in these register lists, which is probably a
1566 bug. */
1567
1568 static int
1569 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1570 {
1571 char *str = *ccp;
1572 int base_reg;
1573 int new_base;
1574 enum arm_reg_type regtype = 0;
1575 int max_regs = 0;
1576 int count = 0;
1577 int warned = 0;
1578 unsigned long mask = 0;
1579 int i;
1580
1581 if (*str != '{')
1582 {
1583 inst.error = _("expecting {");
1584 return FAIL;
1585 }
1586
1587 str++;
1588
1589 switch (etype)
1590 {
1591 case REGLIST_VFP_S:
1592 regtype = REG_TYPE_VFS;
1593 max_regs = 32;
1594 break;
1595
1596 case REGLIST_VFP_D:
1597 regtype = REG_TYPE_VFD;
1598 break;
1599
1600 case REGLIST_NEON_D:
1601 regtype = REG_TYPE_NDQ;
1602 break;
1603 }
1604
1605 if (etype != REGLIST_VFP_S)
1606 {
1607 /* VFPv3 allows 32 D registers. */
1608 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1609 {
1610 max_regs = 32;
1611 if (thumb_mode)
1612 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1613 fpu_vfp_ext_v3);
1614 else
1615 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1616 fpu_vfp_ext_v3);
1617 }
1618 else
1619 max_regs = 16;
1620 }
1621
1622 base_reg = max_regs;
1623
1624 do
1625 {
1626 int setmask = 1, addregs = 1;
1627
1628 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1629
1630 if (new_base == FAIL)
1631 {
1632 first_error (_(reg_expected_msgs[regtype]));
1633 return FAIL;
1634 }
1635
1636 if (new_base >= max_regs)
1637 {
1638 first_error (_("register out of range in list"));
1639 return FAIL;
1640 }
1641
1642 /* Note: a value of 2 * n is returned for the register Q<n>. */
1643 if (regtype == REG_TYPE_NQ)
1644 {
1645 setmask = 3;
1646 addregs = 2;
1647 }
1648
1649 if (new_base < base_reg)
1650 base_reg = new_base;
1651
1652 if (mask & (setmask << new_base))
1653 {
1654 first_error (_("invalid register list"));
1655 return FAIL;
1656 }
1657
1658 if ((mask >> new_base) != 0 && ! warned)
1659 {
1660 as_tsktsk (_("register list not in ascending order"));
1661 warned = 1;
1662 }
1663
1664 mask |= setmask << new_base;
1665 count += addregs;
1666
1667 if (*str == '-') /* We have the start of a range expression */
1668 {
1669 int high_range;
1670
1671 str++;
1672
1673 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1674 == FAIL)
1675 {
1676 inst.error = gettext (reg_expected_msgs[regtype]);
1677 return FAIL;
1678 }
1679
1680 if (high_range >= max_regs)
1681 {
1682 first_error (_("register out of range in list"));
1683 return FAIL;
1684 }
1685
1686 if (regtype == REG_TYPE_NQ)
1687 high_range = high_range + 1;
1688
1689 if (high_range <= new_base)
1690 {
1691 inst.error = _("register range not in ascending order");
1692 return FAIL;
1693 }
1694
1695 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1696 {
1697 if (mask & (setmask << new_base))
1698 {
1699 inst.error = _("invalid register list");
1700 return FAIL;
1701 }
1702
1703 mask |= setmask << new_base;
1704 count += addregs;
1705 }
1706 }
1707 }
1708 while (skip_past_comma (&str) != FAIL);
1709
1710 str++;
1711
1712 /* Sanity check -- should have raised a parse error above. */
1713 if (count == 0 || count > max_regs)
1714 abort ();
1715
1716 *pbase = base_reg;
1717
1718 /* Final test -- the registers must be consecutive. */
1719 mask >>= base_reg;
1720 for (i = 0; i < count; i++)
1721 {
1722 if ((mask & (1u << i)) == 0)
1723 {
1724 inst.error = _("non-contiguous register range");
1725 return FAIL;
1726 }
1727 }
1728
1729 *ccp = str;
1730
1731 return count;
1732 }
1733
1734 /* True if two alias types are the same. */
1735
1736 static int
1737 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1738 {
1739 if (!a && !b)
1740 return 1;
1741
1742 if (!a || !b)
1743 return 0;
1744
1745 if (a->defined != b->defined)
1746 return 0;
1747
1748 if ((a->defined & NTA_HASTYPE) != 0
1749 && (a->eltype.type != b->eltype.type
1750 || a->eltype.size != b->eltype.size))
1751 return 0;
1752
1753 if ((a->defined & NTA_HASINDEX) != 0
1754 && (a->index != b->index))
1755 return 0;
1756
1757 return 1;
1758 }
1759
1760 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1761 The base register is put in *PBASE.
1762 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1763 the return value.
1764 The register stride (minus one) is put in bit 4 of the return value.
1765 Bits [6:5] encode the list length (minus one).
1766 The type of the list elements is put in *ELTYPE, if non-NULL. */
1767
1768 #define NEON_LANE(X) ((X) & 0xf)
1769 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1770 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1771
1772 static int
1773 parse_neon_el_struct_list (char **str, unsigned *pbase,
1774 struct neon_type_el *eltype)
1775 {
1776 char *ptr = *str;
1777 int base_reg = -1;
1778 int reg_incr = -1;
1779 int count = 0;
1780 int lane = -1;
1781 int leading_brace = 0;
1782 enum arm_reg_type rtype = REG_TYPE_NDQ;
1783 int addregs = 1;
1784 const char *const incr_error = "register stride must be 1 or 2";
1785 const char *const type_error = "mismatched element/structure types in list";
1786 struct neon_typed_alias firsttype;
1787
1788 if (skip_past_char (&ptr, '{') == SUCCESS)
1789 leading_brace = 1;
1790
1791 do
1792 {
1793 struct neon_typed_alias atype;
1794 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1795
1796 if (getreg == FAIL)
1797 {
1798 first_error (_(reg_expected_msgs[rtype]));
1799 return FAIL;
1800 }
1801
1802 if (base_reg == -1)
1803 {
1804 base_reg = getreg;
1805 if (rtype == REG_TYPE_NQ)
1806 {
1807 reg_incr = 1;
1808 addregs = 2;
1809 }
1810 firsttype = atype;
1811 }
1812 else if (reg_incr == -1)
1813 {
1814 reg_incr = getreg - base_reg;
1815 if (reg_incr < 1 || reg_incr > 2)
1816 {
1817 first_error (_(incr_error));
1818 return FAIL;
1819 }
1820 }
1821 else if (getreg != base_reg + reg_incr * count)
1822 {
1823 first_error (_(incr_error));
1824 return FAIL;
1825 }
1826
1827 if (!neon_alias_types_same (&atype, &firsttype))
1828 {
1829 first_error (_(type_error));
1830 return FAIL;
1831 }
1832
1833 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1834 modes. */
1835 if (ptr[0] == '-')
1836 {
1837 struct neon_typed_alias htype;
1838 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1839 if (lane == -1)
1840 lane = NEON_INTERLEAVE_LANES;
1841 else if (lane != NEON_INTERLEAVE_LANES)
1842 {
1843 first_error (_(type_error));
1844 return FAIL;
1845 }
1846 if (reg_incr == -1)
1847 reg_incr = 1;
1848 else if (reg_incr != 1)
1849 {
1850 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1851 return FAIL;
1852 }
1853 ptr++;
1854 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1855 if (hireg == FAIL)
1856 {
1857 first_error (_(reg_expected_msgs[rtype]));
1858 return FAIL;
1859 }
1860 if (!neon_alias_types_same (&htype, &firsttype))
1861 {
1862 first_error (_(type_error));
1863 return FAIL;
1864 }
1865 count += hireg + dregs - getreg;
1866 continue;
1867 }
1868
1869 /* If we're using Q registers, we can't use [] or [n] syntax. */
1870 if (rtype == REG_TYPE_NQ)
1871 {
1872 count += 2;
1873 continue;
1874 }
1875
1876 if ((atype.defined & NTA_HASINDEX) != 0)
1877 {
1878 if (lane == -1)
1879 lane = atype.index;
1880 else if (lane != atype.index)
1881 {
1882 first_error (_(type_error));
1883 return FAIL;
1884 }
1885 }
1886 else if (lane == -1)
1887 lane = NEON_INTERLEAVE_LANES;
1888 else if (lane != NEON_INTERLEAVE_LANES)
1889 {
1890 first_error (_(type_error));
1891 return FAIL;
1892 }
1893 count++;
1894 }
1895 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1896
1897 /* No lane set by [x]. We must be interleaving structures. */
1898 if (lane == -1)
1899 lane = NEON_INTERLEAVE_LANES;
1900
1901 /* Sanity check. */
1902 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1903 || (count > 1 && reg_incr == -1))
1904 {
1905 first_error (_("error parsing element/structure list"));
1906 return FAIL;
1907 }
1908
1909 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1910 {
1911 first_error (_("expected }"));
1912 return FAIL;
1913 }
1914
1915 if (reg_incr == -1)
1916 reg_incr = 1;
1917
1918 if (eltype)
1919 *eltype = firsttype.eltype;
1920
1921 *pbase = base_reg;
1922 *str = ptr;
1923
1924 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1925 }
1926
1927 /* Parse an explicit relocation suffix on an expression. This is
1928 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1929 arm_reloc_hsh contains no entries, so this function can only
1930 succeed if there is no () after the word. Returns -1 on error,
1931 BFD_RELOC_UNUSED if there wasn't any suffix. */
1932 static int
1933 parse_reloc (char **str)
1934 {
1935 struct reloc_entry *r;
1936 char *p, *q;
1937
1938 if (**str != '(')
1939 return BFD_RELOC_UNUSED;
1940
1941 p = *str + 1;
1942 q = p;
1943
1944 while (*q && *q != ')' && *q != ',')
1945 q++;
1946 if (*q != ')')
1947 return -1;
1948
1949 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1950 return -1;
1951
1952 *str = q + 1;
1953 return r->reloc;
1954 }
1955
1956 /* Directives: register aliases. */
1957
1958 static struct reg_entry *
1959 insert_reg_alias (char *str, int number, int type)
1960 {
1961 struct reg_entry *new;
1962 const char *name;
1963
1964 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1965 {
1966 if (new->builtin)
1967 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1968
1969 /* Only warn about a redefinition if it's not defined as the
1970 same register. */
1971 else if (new->number != number || new->type != type)
1972 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1973
1974 return 0;
1975 }
1976
1977 name = xstrdup (str);
1978 new = xmalloc (sizeof (struct reg_entry));
1979
1980 new->name = name;
1981 new->number = number;
1982 new->type = type;
1983 new->builtin = FALSE;
1984 new->neon = NULL;
1985
1986 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1987 abort ();
1988
1989 return new;
1990 }
1991
1992 static void
1993 insert_neon_reg_alias (char *str, int number, int type,
1994 struct neon_typed_alias *atype)
1995 {
1996 struct reg_entry *reg = insert_reg_alias (str, number, type);
1997
1998 if (!reg)
1999 {
2000 first_error (_("attempt to redefine typed alias"));
2001 return;
2002 }
2003
2004 if (atype)
2005 {
2006 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2007 *reg->neon = *atype;
2008 }
2009 }
2010
2011 /* Look for the .req directive. This is of the form:
2012
2013 new_register_name .req existing_register_name
2014
2015 If we find one, or if it looks sufficiently like one that we want to
2016 handle any error here, return non-zero. Otherwise return zero. */
2017
2018 static int
2019 create_register_alias (char * newname, char *p)
2020 {
2021 struct reg_entry *old;
2022 char *oldname, *nbuf;
2023 size_t nlen;
2024
2025 /* The input scrubber ensures that whitespace after the mnemonic is
2026 collapsed to single spaces. */
2027 oldname = p;
2028 if (strncmp (oldname, " .req ", 6) != 0)
2029 return 0;
2030
2031 oldname += 6;
2032 if (*oldname == '\0')
2033 return 0;
2034
2035 old = hash_find (arm_reg_hsh, oldname);
2036 if (!old)
2037 {
2038 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2039 return 1;
2040 }
2041
2042 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2043 the desired alias name, and p points to its end. If not, then
2044 the desired alias name is in the global original_case_string. */
2045 #ifdef TC_CASE_SENSITIVE
2046 nlen = p - newname;
2047 #else
2048 newname = original_case_string;
2049 nlen = strlen (newname);
2050 #endif
2051
2052 nbuf = alloca (nlen + 1);
2053 memcpy (nbuf, newname, nlen);
2054 nbuf[nlen] = '\0';
2055
2056 /* Create aliases under the new name as stated; an all-lowercase
2057 version of the new name; and an all-uppercase version of the new
2058 name. */
2059 insert_reg_alias (nbuf, old->number, old->type);
2060
2061 for (p = nbuf; *p; p++)
2062 *p = TOUPPER (*p);
2063
2064 if (strncmp (nbuf, newname, nlen))
2065 insert_reg_alias (nbuf, old->number, old->type);
2066
2067 for (p = nbuf; *p; p++)
2068 *p = TOLOWER (*p);
2069
2070 if (strncmp (nbuf, newname, nlen))
2071 insert_reg_alias (nbuf, old->number, old->type);
2072
2073 return 1;
2074 }
2075
2076 /* Create a Neon typed/indexed register alias using directives, e.g.:
2077 X .dn d5.s32[1]
2078 Y .qn 6.s16
2079 Z .dn d7
2080 T .dn Z[0]
2081 These typed registers can be used instead of the types specified after the
2082 Neon mnemonic, so long as all operands given have types. Types can also be
2083 specified directly, e.g.:
2084 vadd d0.s32, d1.s32, d2.s32
2085 */
2086
2087 static int
2088 create_neon_reg_alias (char *newname, char *p)
2089 {
2090 enum arm_reg_type basetype;
2091 struct reg_entry *basereg;
2092 struct reg_entry mybasereg;
2093 struct neon_type ntype;
2094 struct neon_typed_alias typeinfo;
2095 char *namebuf, *nameend;
2096 int namelen;
2097
2098 typeinfo.defined = 0;
2099 typeinfo.eltype.type = NT_invtype;
2100 typeinfo.eltype.size = -1;
2101 typeinfo.index = -1;
2102
2103 nameend = p;
2104
2105 if (strncmp (p, " .dn ", 5) == 0)
2106 basetype = REG_TYPE_VFD;
2107 else if (strncmp (p, " .qn ", 5) == 0)
2108 basetype = REG_TYPE_NQ;
2109 else
2110 return 0;
2111
2112 p += 5;
2113
2114 if (*p == '\0')
2115 return 0;
2116
2117 basereg = arm_reg_parse_multi (&p);
2118
2119 if (basereg && basereg->type != basetype)
2120 {
2121 as_bad (_("bad type for register"));
2122 return 0;
2123 }
2124
2125 if (basereg == NULL)
2126 {
2127 expressionS exp;
2128 /* Try parsing as an integer. */
2129 my_get_expression (&exp, &p, GE_NO_PREFIX);
2130 if (exp.X_op != O_constant)
2131 {
2132 as_bad (_("expression must be constant"));
2133 return 0;
2134 }
2135 basereg = &mybasereg;
2136 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2137 : exp.X_add_number;
2138 basereg->neon = 0;
2139 }
2140
2141 if (basereg->neon)
2142 typeinfo = *basereg->neon;
2143
2144 if (parse_neon_type (&ntype, &p) == SUCCESS)
2145 {
2146 /* We got a type. */
2147 if (typeinfo.defined & NTA_HASTYPE)
2148 {
2149 as_bad (_("can't redefine the type of a register alias"));
2150 return 0;
2151 }
2152
2153 typeinfo.defined |= NTA_HASTYPE;
2154 if (ntype.elems != 1)
2155 {
2156 as_bad (_("you must specify a single type only"));
2157 return 0;
2158 }
2159 typeinfo.eltype = ntype.el[0];
2160 }
2161
2162 if (skip_past_char (&p, '[') == SUCCESS)
2163 {
2164 expressionS exp;
2165 /* We got a scalar index. */
2166
2167 if (typeinfo.defined & NTA_HASINDEX)
2168 {
2169 as_bad (_("can't redefine the index of a scalar alias"));
2170 return 0;
2171 }
2172
2173 my_get_expression (&exp, &p, GE_NO_PREFIX);
2174
2175 if (exp.X_op != O_constant)
2176 {
2177 as_bad (_("scalar index must be constant"));
2178 return 0;
2179 }
2180
2181 typeinfo.defined |= NTA_HASINDEX;
2182 typeinfo.index = exp.X_add_number;
2183
2184 if (skip_past_char (&p, ']') == FAIL)
2185 {
2186 as_bad (_("expecting ]"));
2187 return 0;
2188 }
2189 }
2190
2191 namelen = nameend - newname;
2192 namebuf = alloca (namelen + 1);
2193 strncpy (namebuf, newname, namelen);
2194 namebuf[namelen] = '\0';
2195
2196 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2197 typeinfo.defined != 0 ? &typeinfo : NULL);
2198
2199 /* Insert name in all uppercase. */
2200 for (p = namebuf; *p; p++)
2201 *p = TOUPPER (*p);
2202
2203 if (strncmp (namebuf, newname, namelen))
2204 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2205 typeinfo.defined != 0 ? &typeinfo : NULL);
2206
2207 /* Insert name in all lowercase. */
2208 for (p = namebuf; *p; p++)
2209 *p = TOLOWER (*p);
2210
2211 if (strncmp (namebuf, newname, namelen))
2212 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2213 typeinfo.defined != 0 ? &typeinfo : NULL);
2214
2215 return 1;
2216 }
2217
2218 /* Should never be called, as .req goes between the alias and the
2219 register name, not at the beginning of the line. */
2220 static void
2221 s_req (int a ATTRIBUTE_UNUSED)
2222 {
2223 as_bad (_("invalid syntax for .req directive"));
2224 }
2225
2226 static void
2227 s_dn (int a ATTRIBUTE_UNUSED)
2228 {
2229 as_bad (_("invalid syntax for .dn directive"));
2230 }
2231
2232 static void
2233 s_qn (int a ATTRIBUTE_UNUSED)
2234 {
2235 as_bad (_("invalid syntax for .qn directive"));
2236 }
2237
2238 /* The .unreq directive deletes an alias which was previously defined
2239 by .req. For example:
2240
2241 my_alias .req r11
2242 .unreq my_alias */
2243
2244 static void
2245 s_unreq (int a ATTRIBUTE_UNUSED)
2246 {
2247 char * name;
2248 char saved_char;
2249
2250 name = input_line_pointer;
2251
2252 while (*input_line_pointer != 0
2253 && *input_line_pointer != ' '
2254 && *input_line_pointer != '\n')
2255 ++input_line_pointer;
2256
2257 saved_char = *input_line_pointer;
2258 *input_line_pointer = 0;
2259
2260 if (!*name)
2261 as_bad (_("invalid syntax for .unreq directive"));
2262 else
2263 {
2264 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2265
2266 if (!reg)
2267 as_bad (_("unknown register alias '%s'"), name);
2268 else if (reg->builtin)
2269 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2270 name);
2271 else
2272 {
2273 hash_delete (arm_reg_hsh, name);
2274 free ((char *) reg->name);
2275 if (reg->neon)
2276 free (reg->neon);
2277 free (reg);
2278 }
2279 }
2280
2281 *input_line_pointer = saved_char;
2282 demand_empty_rest_of_line ();
2283 }
2284
2285 /* Directives: Instruction set selection. */
2286
2287 #ifdef OBJ_ELF
2288 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2289 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2290 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2291 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2292
2293 static enum mstate mapstate = MAP_UNDEFINED;
2294
2295 void
2296 mapping_state (enum mstate state)
2297 {
2298 symbolS * symbolP;
2299 const char * symname;
2300 int type;
2301
2302 if (mapstate == state)
2303 /* The mapping symbol has already been emitted.
2304 There is nothing else to do. */
2305 return;
2306
2307 mapstate = state;
2308
2309 switch (state)
2310 {
2311 case MAP_DATA:
2312 symname = "$d";
2313 type = BSF_NO_FLAGS;
2314 break;
2315 case MAP_ARM:
2316 symname = "$a";
2317 type = BSF_NO_FLAGS;
2318 break;
2319 case MAP_THUMB:
2320 symname = "$t";
2321 type = BSF_NO_FLAGS;
2322 break;
2323 case MAP_UNDEFINED:
2324 return;
2325 default:
2326 abort ();
2327 }
2328
2329 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2330
2331 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2332 symbol_table_insert (symbolP);
2333 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2334
2335 switch (state)
2336 {
2337 case MAP_ARM:
2338 THUMB_SET_FUNC (symbolP, 0);
2339 ARM_SET_THUMB (symbolP, 0);
2340 ARM_SET_INTERWORK (symbolP, support_interwork);
2341 break;
2342
2343 case MAP_THUMB:
2344 THUMB_SET_FUNC (symbolP, 1);
2345 ARM_SET_THUMB (symbolP, 1);
2346 ARM_SET_INTERWORK (symbolP, support_interwork);
2347 break;
2348
2349 case MAP_DATA:
2350 default:
2351 return;
2352 }
2353 }
2354 #else
2355 #define mapping_state(x) /* nothing */
2356 #endif
2357
2358 /* Find the real, Thumb encoded start of a Thumb function. */
2359
2360 static symbolS *
2361 find_real_start (symbolS * symbolP)
2362 {
2363 char * real_start;
2364 const char * name = S_GET_NAME (symbolP);
2365 symbolS * new_target;
2366
2367 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2368 #define STUB_NAME ".real_start_of"
2369
2370 if (name == NULL)
2371 abort ();
2372
2373 /* The compiler may generate BL instructions to local labels because
2374 it needs to perform a branch to a far away location. These labels
2375 do not have a corresponding ".real_start_of" label. We check
2376 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2377 the ".real_start_of" convention for nonlocal branches. */
2378 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2379 return symbolP;
2380
2381 real_start = ACONCAT ((STUB_NAME, name, NULL));
2382 new_target = symbol_find (real_start);
2383
2384 if (new_target == NULL)
2385 {
2386 as_warn ("Failed to find real start of function: %s\n", name);
2387 new_target = symbolP;
2388 }
2389
2390 return new_target;
2391 }
2392
2393 static void
2394 opcode_select (int width)
2395 {
2396 switch (width)
2397 {
2398 case 16:
2399 if (! thumb_mode)
2400 {
2401 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2402 as_bad (_("selected processor does not support THUMB opcodes"));
2403
2404 thumb_mode = 1;
2405 /* No need to force the alignment, since we will have been
2406 coming from ARM mode, which is word-aligned. */
2407 record_alignment (now_seg, 1);
2408 }
2409 mapping_state (MAP_THUMB);
2410 break;
2411
2412 case 32:
2413 if (thumb_mode)
2414 {
2415 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2416 as_bad (_("selected processor does not support ARM opcodes"));
2417
2418 thumb_mode = 0;
2419
2420 if (!need_pass_2)
2421 frag_align (2, 0, 0);
2422
2423 record_alignment (now_seg, 1);
2424 }
2425 mapping_state (MAP_ARM);
2426 break;
2427
2428 default:
2429 as_bad (_("invalid instruction size selected (%d)"), width);
2430 }
2431 }
2432
2433 static void
2434 s_arm (int ignore ATTRIBUTE_UNUSED)
2435 {
2436 opcode_select (32);
2437 demand_empty_rest_of_line ();
2438 }
2439
2440 static void
2441 s_thumb (int ignore ATTRIBUTE_UNUSED)
2442 {
2443 opcode_select (16);
2444 demand_empty_rest_of_line ();
2445 }
2446
2447 static void
2448 s_code (int unused ATTRIBUTE_UNUSED)
2449 {
2450 int temp;
2451
2452 temp = get_absolute_expression ();
2453 switch (temp)
2454 {
2455 case 16:
2456 case 32:
2457 opcode_select (temp);
2458 break;
2459
2460 default:
2461 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2462 }
2463 }
2464
2465 static void
2466 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2467 {
2468 /* If we are not already in thumb mode go into it, EVEN if
2469 the target processor does not support thumb instructions.
2470 This is used by gcc/config/arm/lib1funcs.asm for example
2471 to compile interworking support functions even if the
2472 target processor should not support interworking. */
2473 if (! thumb_mode)
2474 {
2475 thumb_mode = 2;
2476 record_alignment (now_seg, 1);
2477 }
2478
2479 demand_empty_rest_of_line ();
2480 }
2481
2482 static void
2483 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2484 {
2485 s_thumb (0);
2486
2487 /* The following label is the name/address of the start of a Thumb function.
2488 We need to know this for the interworking support. */
2489 label_is_thumb_function_name = TRUE;
2490 }
2491
2492 /* Perform a .set directive, but also mark the alias as
2493 being a thumb function. */
2494
2495 static void
2496 s_thumb_set (int equiv)
2497 {
2498 /* XXX the following is a duplicate of the code for s_set() in read.c
2499 We cannot just call that code as we need to get at the symbol that
2500 is created. */
2501 char * name;
2502 char delim;
2503 char * end_name;
2504 symbolS * symbolP;
2505
2506 /* Especial apologies for the random logic:
2507 This just grew, and could be parsed much more simply!
2508 Dean - in haste. */
2509 name = input_line_pointer;
2510 delim = get_symbol_end ();
2511 end_name = input_line_pointer;
2512 *end_name = delim;
2513
2514 if (*input_line_pointer != ',')
2515 {
2516 *end_name = 0;
2517 as_bad (_("expected comma after name \"%s\""), name);
2518 *end_name = delim;
2519 ignore_rest_of_line ();
2520 return;
2521 }
2522
2523 input_line_pointer++;
2524 *end_name = 0;
2525
2526 if (name[0] == '.' && name[1] == '\0')
2527 {
2528 /* XXX - this should not happen to .thumb_set. */
2529 abort ();
2530 }
2531
2532 if ((symbolP = symbol_find (name)) == NULL
2533 && (symbolP = md_undefined_symbol (name)) == NULL)
2534 {
2535 #ifndef NO_LISTING
2536 /* When doing symbol listings, play games with dummy fragments living
2537 outside the normal fragment chain to record the file and line info
2538 for this symbol. */
2539 if (listing & LISTING_SYMBOLS)
2540 {
2541 extern struct list_info_struct * listing_tail;
2542 fragS * dummy_frag = xmalloc (sizeof (fragS));
2543
2544 memset (dummy_frag, 0, sizeof (fragS));
2545 dummy_frag->fr_type = rs_fill;
2546 dummy_frag->line = listing_tail;
2547 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2548 dummy_frag->fr_symbol = symbolP;
2549 }
2550 else
2551 #endif
2552 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2553
2554 #ifdef OBJ_COFF
2555 /* "set" symbols are local unless otherwise specified. */
2556 SF_SET_LOCAL (symbolP);
2557 #endif /* OBJ_COFF */
2558 } /* Make a new symbol. */
2559
2560 symbol_table_insert (symbolP);
2561
2562 * end_name = delim;
2563
2564 if (equiv
2565 && S_IS_DEFINED (symbolP)
2566 && S_GET_SEGMENT (symbolP) != reg_section)
2567 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2568
2569 pseudo_set (symbolP);
2570
2571 demand_empty_rest_of_line ();
2572
2573 /* XXX Now we come to the Thumb specific bit of code. */
2574
2575 THUMB_SET_FUNC (symbolP, 1);
2576 ARM_SET_THUMB (symbolP, 1);
2577 #if defined OBJ_ELF || defined OBJ_COFF
2578 ARM_SET_INTERWORK (symbolP, support_interwork);
2579 #endif
2580 }
2581
2582 /* Directives: Mode selection. */
2583
2584 /* .syntax [unified|divided] - choose the new unified syntax
2585 (same for Arm and Thumb encoding, modulo slight differences in what
2586 can be represented) or the old divergent syntax for each mode. */
2587 static void
2588 s_syntax (int unused ATTRIBUTE_UNUSED)
2589 {
2590 char *name, delim;
2591
2592 name = input_line_pointer;
2593 delim = get_symbol_end ();
2594
2595 if (!strcasecmp (name, "unified"))
2596 unified_syntax = TRUE;
2597 else if (!strcasecmp (name, "divided"))
2598 unified_syntax = FALSE;
2599 else
2600 {
2601 as_bad (_("unrecognized syntax mode \"%s\""), name);
2602 return;
2603 }
2604 *input_line_pointer = delim;
2605 demand_empty_rest_of_line ();
2606 }
2607
2608 /* Directives: sectioning and alignment. */
2609
2610 /* Same as s_align_ptwo but align 0 => align 2. */
2611
2612 static void
2613 s_align (int unused ATTRIBUTE_UNUSED)
2614 {
2615 int temp;
2616 long temp_fill;
2617 long max_alignment = 15;
2618
2619 temp = get_absolute_expression ();
2620 if (temp > max_alignment)
2621 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2622 else if (temp < 0)
2623 {
2624 as_bad (_("alignment negative. 0 assumed."));
2625 temp = 0;
2626 }
2627
2628 if (*input_line_pointer == ',')
2629 {
2630 input_line_pointer++;
2631 temp_fill = get_absolute_expression ();
2632 }
2633 else
2634 temp_fill = 0;
2635
2636 if (!temp)
2637 temp = 2;
2638
2639 /* Only make a frag if we HAVE to. */
2640 if (temp && !need_pass_2)
2641 frag_align (temp, (int) temp_fill, 0);
2642 demand_empty_rest_of_line ();
2643
2644 record_alignment (now_seg, temp);
2645 }
2646
2647 static void
2648 s_bss (int ignore ATTRIBUTE_UNUSED)
2649 {
2650 /* We don't support putting frags in the BSS segment, we fake it by
2651 marking in_bss, then looking at s_skip for clues. */
2652 subseg_set (bss_section, 0);
2653 demand_empty_rest_of_line ();
2654 mapping_state (MAP_DATA);
2655 }
2656
2657 static void
2658 s_even (int ignore ATTRIBUTE_UNUSED)
2659 {
2660 /* Never make frag if expect extra pass. */
2661 if (!need_pass_2)
2662 frag_align (1, 0, 0);
2663
2664 record_alignment (now_seg, 1);
2665
2666 demand_empty_rest_of_line ();
2667 }
2668
2669 /* Directives: Literal pools. */
2670
2671 static literal_pool *
2672 find_literal_pool (void)
2673 {
2674 literal_pool * pool;
2675
2676 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2677 {
2678 if (pool->section == now_seg
2679 && pool->sub_section == now_subseg)
2680 break;
2681 }
2682
2683 return pool;
2684 }
2685
2686 static literal_pool *
2687 find_or_make_literal_pool (void)
2688 {
2689 /* Next literal pool ID number. */
2690 static unsigned int latest_pool_num = 1;
2691 literal_pool * pool;
2692
2693 pool = find_literal_pool ();
2694
2695 if (pool == NULL)
2696 {
2697 /* Create a new pool. */
2698 pool = xmalloc (sizeof (* pool));
2699 if (! pool)
2700 return NULL;
2701
2702 pool->next_free_entry = 0;
2703 pool->section = now_seg;
2704 pool->sub_section = now_subseg;
2705 pool->next = list_of_pools;
2706 pool->symbol = NULL;
2707
2708 /* Add it to the list. */
2709 list_of_pools = pool;
2710 }
2711
2712 /* New pools, and emptied pools, will have a NULL symbol. */
2713 if (pool->symbol == NULL)
2714 {
2715 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2716 (valueT) 0, &zero_address_frag);
2717 pool->id = latest_pool_num ++;
2718 }
2719
2720 /* Done. */
2721 return pool;
2722 }
2723
2724 /* Add the literal in the global 'inst'
2725 structure to the relevent literal pool. */
2726
2727 static int
2728 add_to_lit_pool (void)
2729 {
2730 literal_pool * pool;
2731 unsigned int entry;
2732
2733 pool = find_or_make_literal_pool ();
2734
2735 /* Check if this literal value is already in the pool. */
2736 for (entry = 0; entry < pool->next_free_entry; entry ++)
2737 {
2738 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2739 && (inst.reloc.exp.X_op == O_constant)
2740 && (pool->literals[entry].X_add_number
2741 == inst.reloc.exp.X_add_number)
2742 && (pool->literals[entry].X_unsigned
2743 == inst.reloc.exp.X_unsigned))
2744 break;
2745
2746 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2747 && (inst.reloc.exp.X_op == O_symbol)
2748 && (pool->literals[entry].X_add_number
2749 == inst.reloc.exp.X_add_number)
2750 && (pool->literals[entry].X_add_symbol
2751 == inst.reloc.exp.X_add_symbol)
2752 && (pool->literals[entry].X_op_symbol
2753 == inst.reloc.exp.X_op_symbol))
2754 break;
2755 }
2756
2757 /* Do we need to create a new entry? */
2758 if (entry == pool->next_free_entry)
2759 {
2760 if (entry >= MAX_LITERAL_POOL_SIZE)
2761 {
2762 inst.error = _("literal pool overflow");
2763 return FAIL;
2764 }
2765
2766 pool->literals[entry] = inst.reloc.exp;
2767 pool->next_free_entry += 1;
2768 }
2769
2770 inst.reloc.exp.X_op = O_symbol;
2771 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2772 inst.reloc.exp.X_add_symbol = pool->symbol;
2773
2774 return SUCCESS;
2775 }
2776
2777 /* Can't use symbol_new here, so have to create a symbol and then at
2778 a later date assign it a value. Thats what these functions do. */
2779
2780 static void
2781 symbol_locate (symbolS * symbolP,
2782 const char * name, /* It is copied, the caller can modify. */
2783 segT segment, /* Segment identifier (SEG_<something>). */
2784 valueT valu, /* Symbol value. */
2785 fragS * frag) /* Associated fragment. */
2786 {
2787 unsigned int name_length;
2788 char * preserved_copy_of_name;
2789
2790 name_length = strlen (name) + 1; /* +1 for \0. */
2791 obstack_grow (&notes, name, name_length);
2792 preserved_copy_of_name = obstack_finish (&notes);
2793
2794 #ifdef tc_canonicalize_symbol_name
2795 preserved_copy_of_name =
2796 tc_canonicalize_symbol_name (preserved_copy_of_name);
2797 #endif
2798
2799 S_SET_NAME (symbolP, preserved_copy_of_name);
2800
2801 S_SET_SEGMENT (symbolP, segment);
2802 S_SET_VALUE (symbolP, valu);
2803 symbol_clear_list_pointers (symbolP);
2804
2805 symbol_set_frag (symbolP, frag);
2806
2807 /* Link to end of symbol chain. */
2808 {
2809 extern int symbol_table_frozen;
2810
2811 if (symbol_table_frozen)
2812 abort ();
2813 }
2814
2815 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2816
2817 obj_symbol_new_hook (symbolP);
2818
2819 #ifdef tc_symbol_new_hook
2820 tc_symbol_new_hook (symbolP);
2821 #endif
2822
2823 #ifdef DEBUG_SYMS
2824 verify_symbol_chain (symbol_rootP, symbol_lastP);
2825 #endif /* DEBUG_SYMS */
2826 }
2827
2828
2829 static void
2830 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2831 {
2832 unsigned int entry;
2833 literal_pool * pool;
2834 char sym_name[20];
2835
2836 pool = find_literal_pool ();
2837 if (pool == NULL
2838 || pool->symbol == NULL
2839 || pool->next_free_entry == 0)
2840 return;
2841
2842 mapping_state (MAP_DATA);
2843
2844 /* Align pool as you have word accesses.
2845 Only make a frag if we have to. */
2846 if (!need_pass_2)
2847 frag_align (2, 0, 0);
2848
2849 record_alignment (now_seg, 2);
2850
2851 sprintf (sym_name, "$$lit_\002%x", pool->id);
2852
2853 symbol_locate (pool->symbol, sym_name, now_seg,
2854 (valueT) frag_now_fix (), frag_now);
2855 symbol_table_insert (pool->symbol);
2856
2857 ARM_SET_THUMB (pool->symbol, thumb_mode);
2858
2859 #if defined OBJ_COFF || defined OBJ_ELF
2860 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2861 #endif
2862
2863 for (entry = 0; entry < pool->next_free_entry; entry ++)
2864 /* First output the expression in the instruction to the pool. */
2865 emit_expr (&(pool->literals[entry]), 4); /* .word */
2866
2867 /* Mark the pool as empty. */
2868 pool->next_free_entry = 0;
2869 pool->symbol = NULL;
2870 }
2871
2872 #ifdef OBJ_ELF
2873 /* Forward declarations for functions below, in the MD interface
2874 section. */
2875 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2876 static valueT create_unwind_entry (int);
2877 static void start_unwind_section (const segT, int);
2878 static void add_unwind_opcode (valueT, int);
2879 static void flush_pending_unwind (void);
2880
2881 /* Directives: Data. */
2882
2883 static void
2884 s_arm_elf_cons (int nbytes)
2885 {
2886 expressionS exp;
2887
2888 #ifdef md_flush_pending_output
2889 md_flush_pending_output ();
2890 #endif
2891
2892 if (is_it_end_of_statement ())
2893 {
2894 demand_empty_rest_of_line ();
2895 return;
2896 }
2897
2898 #ifdef md_cons_align
2899 md_cons_align (nbytes);
2900 #endif
2901
2902 mapping_state (MAP_DATA);
2903 do
2904 {
2905 int reloc;
2906 char *base = input_line_pointer;
2907
2908 expression (& exp);
2909
2910 if (exp.X_op != O_symbol)
2911 emit_expr (&exp, (unsigned int) nbytes);
2912 else
2913 {
2914 char *before_reloc = input_line_pointer;
2915 reloc = parse_reloc (&input_line_pointer);
2916 if (reloc == -1)
2917 {
2918 as_bad (_("unrecognized relocation suffix"));
2919 ignore_rest_of_line ();
2920 return;
2921 }
2922 else if (reloc == BFD_RELOC_UNUSED)
2923 emit_expr (&exp, (unsigned int) nbytes);
2924 else
2925 {
2926 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2927 int size = bfd_get_reloc_size (howto);
2928
2929 if (reloc == BFD_RELOC_ARM_PLT32)
2930 {
2931 as_bad (_("(plt) is only valid on branch targets"));
2932 reloc = BFD_RELOC_UNUSED;
2933 size = 0;
2934 }
2935
2936 if (size > nbytes)
2937 as_bad (_("%s relocations do not fit in %d bytes"),
2938 howto->name, nbytes);
2939 else
2940 {
2941 /* We've parsed an expression stopping at O_symbol.
2942 But there may be more expression left now that we
2943 have parsed the relocation marker. Parse it again.
2944 XXX Surely there is a cleaner way to do this. */
2945 char *p = input_line_pointer;
2946 int offset;
2947 char *save_buf = alloca (input_line_pointer - base);
2948 memcpy (save_buf, base, input_line_pointer - base);
2949 memmove (base + (input_line_pointer - before_reloc),
2950 base, before_reloc - base);
2951
2952 input_line_pointer = base + (input_line_pointer-before_reloc);
2953 expression (&exp);
2954 memcpy (base, save_buf, p - base);
2955
2956 offset = nbytes - size;
2957 p = frag_more ((int) nbytes);
2958 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2959 size, &exp, 0, reloc);
2960 }
2961 }
2962 }
2963 }
2964 while (*input_line_pointer++ == ',');
2965
2966 /* Put terminator back into stream. */
2967 input_line_pointer --;
2968 demand_empty_rest_of_line ();
2969 }
2970
2971
2972 /* Parse a .rel31 directive. */
2973
2974 static void
2975 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2976 {
2977 expressionS exp;
2978 char *p;
2979 valueT highbit;
2980
2981 highbit = 0;
2982 if (*input_line_pointer == '1')
2983 highbit = 0x80000000;
2984 else if (*input_line_pointer != '0')
2985 as_bad (_("expected 0 or 1"));
2986
2987 input_line_pointer++;
2988 if (*input_line_pointer != ',')
2989 as_bad (_("missing comma"));
2990 input_line_pointer++;
2991
2992 #ifdef md_flush_pending_output
2993 md_flush_pending_output ();
2994 #endif
2995
2996 #ifdef md_cons_align
2997 md_cons_align (4);
2998 #endif
2999
3000 mapping_state (MAP_DATA);
3001
3002 expression (&exp);
3003
3004 p = frag_more (4);
3005 md_number_to_chars (p, highbit, 4);
3006 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3007 BFD_RELOC_ARM_PREL31);
3008
3009 demand_empty_rest_of_line ();
3010 }
3011
3012 /* Directives: AEABI stack-unwind tables. */
3013
3014 /* Parse an unwind_fnstart directive. Simply records the current location. */
3015
3016 static void
3017 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3018 {
3019 demand_empty_rest_of_line ();
3020 /* Mark the start of the function. */
3021 unwind.proc_start = expr_build_dot ();
3022
3023 /* Reset the rest of the unwind info. */
3024 unwind.opcode_count = 0;
3025 unwind.table_entry = NULL;
3026 unwind.personality_routine = NULL;
3027 unwind.personality_index = -1;
3028 unwind.frame_size = 0;
3029 unwind.fp_offset = 0;
3030 unwind.fp_reg = 13;
3031 unwind.fp_used = 0;
3032 unwind.sp_restored = 0;
3033 }
3034
3035
3036 /* Parse a handlerdata directive. Creates the exception handling table entry
3037 for the function. */
3038
3039 static void
3040 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3041 {
3042 demand_empty_rest_of_line ();
3043 if (unwind.table_entry)
3044 as_bad (_("dupicate .handlerdata directive"));
3045
3046 create_unwind_entry (1);
3047 }
3048
3049 /* Parse an unwind_fnend directive. Generates the index table entry. */
3050
3051 static void
3052 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3053 {
3054 long where;
3055 char *ptr;
3056 valueT val;
3057
3058 demand_empty_rest_of_line ();
3059
3060 /* Add eh table entry. */
3061 if (unwind.table_entry == NULL)
3062 val = create_unwind_entry (0);
3063 else
3064 val = 0;
3065
3066 /* Add index table entry. This is two words. */
3067 start_unwind_section (unwind.saved_seg, 1);
3068 frag_align (2, 0, 0);
3069 record_alignment (now_seg, 2);
3070
3071 ptr = frag_more (8);
3072 where = frag_now_fix () - 8;
3073
3074 /* Self relative offset of the function start. */
3075 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3076 BFD_RELOC_ARM_PREL31);
3077
3078 /* Indicate dependency on EHABI-defined personality routines to the
3079 linker, if it hasn't been done already. */
3080 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3081 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3082 {
3083 static const char *const name[] = {
3084 "__aeabi_unwind_cpp_pr0",
3085 "__aeabi_unwind_cpp_pr1",
3086 "__aeabi_unwind_cpp_pr2"
3087 };
3088 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3089 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3090 marked_pr_dependency |= 1 << unwind.personality_index;
3091 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3092 = marked_pr_dependency;
3093 }
3094
3095 if (val)
3096 /* Inline exception table entry. */
3097 md_number_to_chars (ptr + 4, val, 4);
3098 else
3099 /* Self relative offset of the table entry. */
3100 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3101 BFD_RELOC_ARM_PREL31);
3102
3103 /* Restore the original section. */
3104 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3105 }
3106
3107
3108 /* Parse an unwind_cantunwind directive. */
3109
3110 static void
3111 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3112 {
3113 demand_empty_rest_of_line ();
3114 if (unwind.personality_routine || unwind.personality_index != -1)
3115 as_bad (_("personality routine specified for cantunwind frame"));
3116
3117 unwind.personality_index = -2;
3118 }
3119
3120
3121 /* Parse a personalityindex directive. */
3122
3123 static void
3124 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3125 {
3126 expressionS exp;
3127
3128 if (unwind.personality_routine || unwind.personality_index != -1)
3129 as_bad (_("duplicate .personalityindex directive"));
3130
3131 expression (&exp);
3132
3133 if (exp.X_op != O_constant
3134 || exp.X_add_number < 0 || exp.X_add_number > 15)
3135 {
3136 as_bad (_("bad personality routine number"));
3137 ignore_rest_of_line ();
3138 return;
3139 }
3140
3141 unwind.personality_index = exp.X_add_number;
3142
3143 demand_empty_rest_of_line ();
3144 }
3145
3146
3147 /* Parse a personality directive. */
3148
3149 static void
3150 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3151 {
3152 char *name, *p, c;
3153
3154 if (unwind.personality_routine || unwind.personality_index != -1)
3155 as_bad (_("duplicate .personality directive"));
3156
3157 name = input_line_pointer;
3158 c = get_symbol_end ();
3159 p = input_line_pointer;
3160 unwind.personality_routine = symbol_find_or_make (name);
3161 *p = c;
3162 demand_empty_rest_of_line ();
3163 }
3164
3165
3166 /* Parse a directive saving core registers. */
3167
3168 static void
3169 s_arm_unwind_save_core (void)
3170 {
3171 valueT op;
3172 long range;
3173 int n;
3174
3175 range = parse_reg_list (&input_line_pointer);
3176 if (range == FAIL)
3177 {
3178 as_bad (_("expected register list"));
3179 ignore_rest_of_line ();
3180 return;
3181 }
3182
3183 demand_empty_rest_of_line ();
3184
3185 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3186 into .unwind_save {..., sp...}. We aren't bothered about the value of
3187 ip because it is clobbered by calls. */
3188 if (unwind.sp_restored && unwind.fp_reg == 12
3189 && (range & 0x3000) == 0x1000)
3190 {
3191 unwind.opcode_count--;
3192 unwind.sp_restored = 0;
3193 range = (range | 0x2000) & ~0x1000;
3194 unwind.pending_offset = 0;
3195 }
3196
3197 /* Pop r4-r15. */
3198 if (range & 0xfff0)
3199 {
3200 /* See if we can use the short opcodes. These pop a block of up to 8
3201 registers starting with r4, plus maybe r14. */
3202 for (n = 0; n < 8; n++)
3203 {
3204 /* Break at the first non-saved register. */
3205 if ((range & (1 << (n + 4))) == 0)
3206 break;
3207 }
3208 /* See if there are any other bits set. */
3209 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3210 {
3211 /* Use the long form. */
3212 op = 0x8000 | ((range >> 4) & 0xfff);
3213 add_unwind_opcode (op, 2);
3214 }
3215 else
3216 {
3217 /* Use the short form. */
3218 if (range & 0x4000)
3219 op = 0xa8; /* Pop r14. */
3220 else
3221 op = 0xa0; /* Do not pop r14. */
3222 op |= (n - 1);
3223 add_unwind_opcode (op, 1);
3224 }
3225 }
3226
3227 /* Pop r0-r3. */
3228 if (range & 0xf)
3229 {
3230 op = 0xb100 | (range & 0xf);
3231 add_unwind_opcode (op, 2);
3232 }
3233
3234 /* Record the number of bytes pushed. */
3235 for (n = 0; n < 16; n++)
3236 {
3237 if (range & (1 << n))
3238 unwind.frame_size += 4;
3239 }
3240 }
3241
3242
3243 /* Parse a directive saving FPA registers. */
3244
3245 static void
3246 s_arm_unwind_save_fpa (int reg)
3247 {
3248 expressionS exp;
3249 int num_regs;
3250 valueT op;
3251
3252 /* Get Number of registers to transfer. */
3253 if (skip_past_comma (&input_line_pointer) != FAIL)
3254 expression (&exp);
3255 else
3256 exp.X_op = O_illegal;
3257
3258 if (exp.X_op != O_constant)
3259 {
3260 as_bad (_("expected , <constant>"));
3261 ignore_rest_of_line ();
3262 return;
3263 }
3264
3265 num_regs = exp.X_add_number;
3266
3267 if (num_regs < 1 || num_regs > 4)
3268 {
3269 as_bad (_("number of registers must be in the range [1:4]"));
3270 ignore_rest_of_line ();
3271 return;
3272 }
3273
3274 demand_empty_rest_of_line ();
3275
3276 if (reg == 4)
3277 {
3278 /* Short form. */
3279 op = 0xb4 | (num_regs - 1);
3280 add_unwind_opcode (op, 1);
3281 }
3282 else
3283 {
3284 /* Long form. */
3285 op = 0xc800 | (reg << 4) | (num_regs - 1);
3286 add_unwind_opcode (op, 2);
3287 }
3288 unwind.frame_size += num_regs * 12;
3289 }
3290
3291
3292 /* Parse a directive saving VFP registers for ARMv6 and above. */
3293
3294 static void
3295 s_arm_unwind_save_vfp_armv6 (void)
3296 {
3297 int count;
3298 unsigned int start;
3299 valueT op;
3300 int num_vfpv3_regs = 0;
3301 int num_regs_below_16;
3302
3303 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3304 if (count == FAIL)
3305 {
3306 as_bad (_("expected register list"));
3307 ignore_rest_of_line ();
3308 return;
3309 }
3310
3311 demand_empty_rest_of_line ();
3312
3313 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3314 than FSTMX/FLDMX-style ones). */
3315
3316 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3317 if (start >= 16)
3318 num_vfpv3_regs = count;
3319 else if (start + count > 16)
3320 num_vfpv3_regs = start + count - 16;
3321
3322 if (num_vfpv3_regs > 0)
3323 {
3324 int start_offset = start > 16 ? start - 16 : 0;
3325 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3326 add_unwind_opcode (op, 2);
3327 }
3328
3329 /* Generate opcode for registers numbered in the range 0 .. 15. */
3330 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3331 assert (num_regs_below_16 + num_vfpv3_regs == count);
3332 if (num_regs_below_16 > 0)
3333 {
3334 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3335 add_unwind_opcode (op, 2);
3336 }
3337
3338 unwind.frame_size += count * 8;
3339 }
3340
3341
3342 /* Parse a directive saving VFP registers for pre-ARMv6. */
3343
3344 static void
3345 s_arm_unwind_save_vfp (void)
3346 {
3347 int count;
3348 unsigned int reg;
3349 valueT op;
3350
3351 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3352 if (count == FAIL)
3353 {
3354 as_bad (_("expected register list"));
3355 ignore_rest_of_line ();
3356 return;
3357 }
3358
3359 demand_empty_rest_of_line ();
3360
3361 if (reg == 8)
3362 {
3363 /* Short form. */
3364 op = 0xb8 | (count - 1);
3365 add_unwind_opcode (op, 1);
3366 }
3367 else
3368 {
3369 /* Long form. */
3370 op = 0xb300 | (reg << 4) | (count - 1);
3371 add_unwind_opcode (op, 2);
3372 }
3373 unwind.frame_size += count * 8 + 4;
3374 }
3375
3376
3377 /* Parse a directive saving iWMMXt data registers. */
3378
3379 static void
3380 s_arm_unwind_save_mmxwr (void)
3381 {
3382 int reg;
3383 int hi_reg;
3384 int i;
3385 unsigned mask = 0;
3386 valueT op;
3387
3388 if (*input_line_pointer == '{')
3389 input_line_pointer++;
3390
3391 do
3392 {
3393 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3394
3395 if (reg == FAIL)
3396 {
3397 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3398 goto error;
3399 }
3400
3401 if (mask >> reg)
3402 as_tsktsk (_("register list not in ascending order"));
3403 mask |= 1 << reg;
3404
3405 if (*input_line_pointer == '-')
3406 {
3407 input_line_pointer++;
3408 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3409 if (hi_reg == FAIL)
3410 {
3411 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3412 goto error;
3413 }
3414 else if (reg >= hi_reg)
3415 {
3416 as_bad (_("bad register range"));
3417 goto error;
3418 }
3419 for (; reg < hi_reg; reg++)
3420 mask |= 1 << reg;
3421 }
3422 }
3423 while (skip_past_comma (&input_line_pointer) != FAIL);
3424
3425 if (*input_line_pointer == '}')
3426 input_line_pointer++;
3427
3428 demand_empty_rest_of_line ();
3429
3430 /* Generate any deferred opcodes because we're going to be looking at
3431 the list. */
3432 flush_pending_unwind ();
3433
3434 for (i = 0; i < 16; i++)
3435 {
3436 if (mask & (1 << i))
3437 unwind.frame_size += 8;
3438 }
3439
3440 /* Attempt to combine with a previous opcode. We do this because gcc
3441 likes to output separate unwind directives for a single block of
3442 registers. */
3443 if (unwind.opcode_count > 0)
3444 {
3445 i = unwind.opcodes[unwind.opcode_count - 1];
3446 if ((i & 0xf8) == 0xc0)
3447 {
3448 i &= 7;
3449 /* Only merge if the blocks are contiguous. */
3450 if (i < 6)
3451 {
3452 if ((mask & 0xfe00) == (1 << 9))
3453 {
3454 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3455 unwind.opcode_count--;
3456 }
3457 }
3458 else if (i == 6 && unwind.opcode_count >= 2)
3459 {
3460 i = unwind.opcodes[unwind.opcode_count - 2];
3461 reg = i >> 4;
3462 i &= 0xf;
3463
3464 op = 0xffff << (reg - 1);
3465 if (reg > 0
3466 && ((mask & op) == (1u << (reg - 1))))
3467 {
3468 op = (1 << (reg + i + 1)) - 1;
3469 op &= ~((1 << reg) - 1);
3470 mask |= op;
3471 unwind.opcode_count -= 2;
3472 }
3473 }
3474 }
3475 }
3476
3477 hi_reg = 15;
3478 /* We want to generate opcodes in the order the registers have been
3479 saved, ie. descending order. */
3480 for (reg = 15; reg >= -1; reg--)
3481 {
3482 /* Save registers in blocks. */
3483 if (reg < 0
3484 || !(mask & (1 << reg)))
3485 {
3486 /* We found an unsaved reg. Generate opcodes to save the
3487 preceeding block. */
3488 if (reg != hi_reg)
3489 {
3490 if (reg == 9)
3491 {
3492 /* Short form. */
3493 op = 0xc0 | (hi_reg - 10);
3494 add_unwind_opcode (op, 1);
3495 }
3496 else
3497 {
3498 /* Long form. */
3499 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3500 add_unwind_opcode (op, 2);
3501 }
3502 }
3503 hi_reg = reg - 1;
3504 }
3505 }
3506
3507 return;
3508 error:
3509 ignore_rest_of_line ();
3510 }
3511
3512 static void
3513 s_arm_unwind_save_mmxwcg (void)
3514 {
3515 int reg;
3516 int hi_reg;
3517 unsigned mask = 0;
3518 valueT op;
3519
3520 if (*input_line_pointer == '{')
3521 input_line_pointer++;
3522
3523 do
3524 {
3525 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3526
3527 if (reg == FAIL)
3528 {
3529 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3530 goto error;
3531 }
3532
3533 reg -= 8;
3534 if (mask >> reg)
3535 as_tsktsk (_("register list not in ascending order"));
3536 mask |= 1 << reg;
3537
3538 if (*input_line_pointer == '-')
3539 {
3540 input_line_pointer++;
3541 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3542 if (hi_reg == FAIL)
3543 {
3544 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3545 goto error;
3546 }
3547 else if (reg >= hi_reg)
3548 {
3549 as_bad (_("bad register range"));
3550 goto error;
3551 }
3552 for (; reg < hi_reg; reg++)
3553 mask |= 1 << reg;
3554 }
3555 }
3556 while (skip_past_comma (&input_line_pointer) != FAIL);
3557
3558 if (*input_line_pointer == '}')
3559 input_line_pointer++;
3560
3561 demand_empty_rest_of_line ();
3562
3563 /* Generate any deferred opcodes because we're going to be looking at
3564 the list. */
3565 flush_pending_unwind ();
3566
3567 for (reg = 0; reg < 16; reg++)
3568 {
3569 if (mask & (1 << reg))
3570 unwind.frame_size += 4;
3571 }
3572 op = 0xc700 | mask;
3573 add_unwind_opcode (op, 2);
3574 return;
3575 error:
3576 ignore_rest_of_line ();
3577 }
3578
3579
3580 /* Parse an unwind_save directive.
3581 If the argument is non-zero, this is a .vsave directive. */
3582
3583 static void
3584 s_arm_unwind_save (int arch_v6)
3585 {
3586 char *peek;
3587 struct reg_entry *reg;
3588 bfd_boolean had_brace = FALSE;
3589
3590 /* Figure out what sort of save we have. */
3591 peek = input_line_pointer;
3592
3593 if (*peek == '{')
3594 {
3595 had_brace = TRUE;
3596 peek++;
3597 }
3598
3599 reg = arm_reg_parse_multi (&peek);
3600
3601 if (!reg)
3602 {
3603 as_bad (_("register expected"));
3604 ignore_rest_of_line ();
3605 return;
3606 }
3607
3608 switch (reg->type)
3609 {
3610 case REG_TYPE_FN:
3611 if (had_brace)
3612 {
3613 as_bad (_("FPA .unwind_save does not take a register list"));
3614 ignore_rest_of_line ();
3615 return;
3616 }
3617 s_arm_unwind_save_fpa (reg->number);
3618 return;
3619
3620 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3621 case REG_TYPE_VFD:
3622 if (arch_v6)
3623 s_arm_unwind_save_vfp_armv6 ();
3624 else
3625 s_arm_unwind_save_vfp ();
3626 return;
3627 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3628 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3629
3630 default:
3631 as_bad (_(".unwind_save does not support this kind of register"));
3632 ignore_rest_of_line ();
3633 }
3634 }
3635
3636
3637 /* Parse an unwind_movsp directive. */
3638
3639 static void
3640 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3641 {
3642 int reg;
3643 valueT op;
3644 int offset;
3645
3646 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3647 if (reg == FAIL)
3648 {
3649 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3650 ignore_rest_of_line ();
3651 return;
3652 }
3653
3654 /* Optional constant. */
3655 if (skip_past_comma (&input_line_pointer) != FAIL)
3656 {
3657 if (immediate_for_directive (&offset) == FAIL)
3658 return;
3659 }
3660 else
3661 offset = 0;
3662
3663 demand_empty_rest_of_line ();
3664
3665 if (reg == REG_SP || reg == REG_PC)
3666 {
3667 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3668 return;
3669 }
3670
3671 if (unwind.fp_reg != REG_SP)
3672 as_bad (_("unexpected .unwind_movsp directive"));
3673
3674 /* Generate opcode to restore the value. */
3675 op = 0x90 | reg;
3676 add_unwind_opcode (op, 1);
3677
3678 /* Record the information for later. */
3679 unwind.fp_reg = reg;
3680 unwind.fp_offset = unwind.frame_size - offset;
3681 unwind.sp_restored = 1;
3682 }
3683
3684 /* Parse an unwind_pad directive. */
3685
3686 static void
3687 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3688 {
3689 int offset;
3690
3691 if (immediate_for_directive (&offset) == FAIL)
3692 return;
3693
3694 if (offset & 3)
3695 {
3696 as_bad (_("stack increment must be multiple of 4"));
3697 ignore_rest_of_line ();
3698 return;
3699 }
3700
3701 /* Don't generate any opcodes, just record the details for later. */
3702 unwind.frame_size += offset;
3703 unwind.pending_offset += offset;
3704
3705 demand_empty_rest_of_line ();
3706 }
3707
3708 /* Parse an unwind_setfp directive. */
3709
3710 static void
3711 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3712 {
3713 int sp_reg;
3714 int fp_reg;
3715 int offset;
3716
3717 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3718 if (skip_past_comma (&input_line_pointer) == FAIL)
3719 sp_reg = FAIL;
3720 else
3721 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3722
3723 if (fp_reg == FAIL || sp_reg == FAIL)
3724 {
3725 as_bad (_("expected <reg>, <reg>"));
3726 ignore_rest_of_line ();
3727 return;
3728 }
3729
3730 /* Optional constant. */
3731 if (skip_past_comma (&input_line_pointer) != FAIL)
3732 {
3733 if (immediate_for_directive (&offset) == FAIL)
3734 return;
3735 }
3736 else
3737 offset = 0;
3738
3739 demand_empty_rest_of_line ();
3740
3741 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3742 {
3743 as_bad (_("register must be either sp or set by a previous"
3744 "unwind_movsp directive"));
3745 return;
3746 }
3747
3748 /* Don't generate any opcodes, just record the information for later. */
3749 unwind.fp_reg = fp_reg;
3750 unwind.fp_used = 1;
3751 if (sp_reg == 13)
3752 unwind.fp_offset = unwind.frame_size - offset;
3753 else
3754 unwind.fp_offset -= offset;
3755 }
3756
3757 /* Parse an unwind_raw directive. */
3758
3759 static void
3760 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3761 {
3762 expressionS exp;
3763 /* This is an arbitrary limit. */
3764 unsigned char op[16];
3765 int count;
3766
3767 expression (&exp);
3768 if (exp.X_op == O_constant
3769 && skip_past_comma (&input_line_pointer) != FAIL)
3770 {
3771 unwind.frame_size += exp.X_add_number;
3772 expression (&exp);
3773 }
3774 else
3775 exp.X_op = O_illegal;
3776
3777 if (exp.X_op != O_constant)
3778 {
3779 as_bad (_("expected <offset>, <opcode>"));
3780 ignore_rest_of_line ();
3781 return;
3782 }
3783
3784 count = 0;
3785
3786 /* Parse the opcode. */
3787 for (;;)
3788 {
3789 if (count >= 16)
3790 {
3791 as_bad (_("unwind opcode too long"));
3792 ignore_rest_of_line ();
3793 }
3794 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3795 {
3796 as_bad (_("invalid unwind opcode"));
3797 ignore_rest_of_line ();
3798 return;
3799 }
3800 op[count++] = exp.X_add_number;
3801
3802 /* Parse the next byte. */
3803 if (skip_past_comma (&input_line_pointer) == FAIL)
3804 break;
3805
3806 expression (&exp);
3807 }
3808
3809 /* Add the opcode bytes in reverse order. */
3810 while (count--)
3811 add_unwind_opcode (op[count], 1);
3812
3813 demand_empty_rest_of_line ();
3814 }
3815
3816
3817 /* Parse a .eabi_attribute directive. */
3818
3819 static void
3820 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3821 {
3822 expressionS exp;
3823 bfd_boolean is_string;
3824 int tag;
3825 unsigned int i = 0;
3826 char *s = NULL;
3827 char saved_char;
3828
3829 expression (& exp);
3830 if (exp.X_op != O_constant)
3831 goto bad;
3832
3833 tag = exp.X_add_number;
3834 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3835 is_string = 1;
3836 else
3837 is_string = 0;
3838
3839 if (skip_past_comma (&input_line_pointer) == FAIL)
3840 goto bad;
3841 if (tag == 32 || !is_string)
3842 {
3843 expression (& exp);
3844 if (exp.X_op != O_constant)
3845 {
3846 as_bad (_("expected numeric constant"));
3847 ignore_rest_of_line ();
3848 return;
3849 }
3850 i = exp.X_add_number;
3851 }
3852 if (tag == Tag_compatibility
3853 && skip_past_comma (&input_line_pointer) == FAIL)
3854 {
3855 as_bad (_("expected comma"));
3856 ignore_rest_of_line ();
3857 return;
3858 }
3859 if (is_string)
3860 {
3861 skip_whitespace(input_line_pointer);
3862 if (*input_line_pointer != '"')
3863 goto bad_string;
3864 input_line_pointer++;
3865 s = input_line_pointer;
3866 while (*input_line_pointer && *input_line_pointer != '"')
3867 input_line_pointer++;
3868 if (*input_line_pointer != '"')
3869 goto bad_string;
3870 saved_char = *input_line_pointer;
3871 *input_line_pointer = 0;
3872 }
3873 else
3874 {
3875 s = NULL;
3876 saved_char = 0;
3877 }
3878
3879 if (tag == Tag_compatibility)
3880 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3881 else if (is_string)
3882 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3883 else
3884 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3885
3886 if (s)
3887 {
3888 *input_line_pointer = saved_char;
3889 input_line_pointer++;
3890 }
3891 demand_empty_rest_of_line ();
3892 return;
3893 bad_string:
3894 as_bad (_("bad string constant"));
3895 ignore_rest_of_line ();
3896 return;
3897 bad:
3898 as_bad (_("expected <tag> , <value>"));
3899 ignore_rest_of_line ();
3900 }
3901 #endif /* OBJ_ELF */
3902
3903 static void s_arm_arch (int);
3904 static void s_arm_object_arch (int);
3905 static void s_arm_cpu (int);
3906 static void s_arm_fpu (int);
3907
3908 #ifdef TE_PE
3909
3910 static void
3911 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3912 {
3913 expressionS exp;
3914
3915 do
3916 {
3917 expression (&exp);
3918 if (exp.X_op == O_symbol)
3919 exp.X_op = O_secrel;
3920
3921 emit_expr (&exp, 4);
3922 }
3923 while (*input_line_pointer++ == ',');
3924
3925 input_line_pointer--;
3926 demand_empty_rest_of_line ();
3927 }
3928 #endif /* TE_PE */
3929
3930 /* This table describes all the machine specific pseudo-ops the assembler
3931 has to support. The fields are:
3932 pseudo-op name without dot
3933 function to call to execute this pseudo-op
3934 Integer arg to pass to the function. */
3935
3936 const pseudo_typeS md_pseudo_table[] =
3937 {
3938 /* Never called because '.req' does not start a line. */
3939 { "req", s_req, 0 },
3940 /* Following two are likewise never called. */
3941 { "dn", s_dn, 0 },
3942 { "qn", s_qn, 0 },
3943 { "unreq", s_unreq, 0 },
3944 { "bss", s_bss, 0 },
3945 { "align", s_align, 0 },
3946 { "arm", s_arm, 0 },
3947 { "thumb", s_thumb, 0 },
3948 { "code", s_code, 0 },
3949 { "force_thumb", s_force_thumb, 0 },
3950 { "thumb_func", s_thumb_func, 0 },
3951 { "thumb_set", s_thumb_set, 0 },
3952 { "even", s_even, 0 },
3953 { "ltorg", s_ltorg, 0 },
3954 { "pool", s_ltorg, 0 },
3955 { "syntax", s_syntax, 0 },
3956 { "cpu", s_arm_cpu, 0 },
3957 { "arch", s_arm_arch, 0 },
3958 { "object_arch", s_arm_object_arch, 0 },
3959 { "fpu", s_arm_fpu, 0 },
3960 #ifdef OBJ_ELF
3961 { "word", s_arm_elf_cons, 4 },
3962 { "long", s_arm_elf_cons, 4 },
3963 { "rel31", s_arm_rel31, 0 },
3964 { "fnstart", s_arm_unwind_fnstart, 0 },
3965 { "fnend", s_arm_unwind_fnend, 0 },
3966 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3967 { "personality", s_arm_unwind_personality, 0 },
3968 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3969 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3970 { "save", s_arm_unwind_save, 0 },
3971 { "vsave", s_arm_unwind_save, 1 },
3972 { "movsp", s_arm_unwind_movsp, 0 },
3973 { "pad", s_arm_unwind_pad, 0 },
3974 { "setfp", s_arm_unwind_setfp, 0 },
3975 { "unwind_raw", s_arm_unwind_raw, 0 },
3976 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3977 #else
3978 { "word", cons, 4},
3979
3980 /* These are used for dwarf. */
3981 {"2byte", cons, 2},
3982 {"4byte", cons, 4},
3983 {"8byte", cons, 8},
3984 /* These are used for dwarf2. */
3985 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3986 { "loc", dwarf2_directive_loc, 0 },
3987 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3988 #endif
3989 { "extend", float_cons, 'x' },
3990 { "ldouble", float_cons, 'x' },
3991 { "packed", float_cons, 'p' },
3992 #ifdef TE_PE
3993 {"secrel32", pe_directive_secrel, 0},
3994 #endif
3995 { 0, 0, 0 }
3996 };
3997 \f
3998 /* Parser functions used exclusively in instruction operands. */
3999
4000 /* Generic immediate-value read function for use in insn parsing.
4001 STR points to the beginning of the immediate (the leading #);
4002 VAL receives the value; if the value is outside [MIN, MAX]
4003 issue an error. PREFIX_OPT is true if the immediate prefix is
4004 optional. */
4005
4006 static int
4007 parse_immediate (char **str, int *val, int min, int max,
4008 bfd_boolean prefix_opt)
4009 {
4010 expressionS exp;
4011 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4012 if (exp.X_op != O_constant)
4013 {
4014 inst.error = _("constant expression required");
4015 return FAIL;
4016 }
4017
4018 if (exp.X_add_number < min || exp.X_add_number > max)
4019 {
4020 inst.error = _("immediate value out of range");
4021 return FAIL;
4022 }
4023
4024 *val = exp.X_add_number;
4025 return SUCCESS;
4026 }
4027
4028 /* Less-generic immediate-value read function with the possibility of loading a
4029 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4030 instructions. Puts the result directly in inst.operands[i]. */
4031
4032 static int
4033 parse_big_immediate (char **str, int i)
4034 {
4035 expressionS exp;
4036 char *ptr = *str;
4037
4038 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4039
4040 if (exp.X_op == O_constant)
4041 {
4042 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4043 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4044 O_constant. We have to be careful not to break compilation for
4045 32-bit X_add_number, though. */
4046 if ((exp.X_add_number & ~0xffffffffl) != 0)
4047 {
4048 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4049 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4050 inst.operands[i].regisimm = 1;
4051 }
4052 }
4053 else if (exp.X_op == O_big
4054 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4055 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4056 {
4057 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4058 /* Bignums have their least significant bits in
4059 generic_bignum[0]. Make sure we put 32 bits in imm and
4060 32 bits in reg, in a (hopefully) portable way. */
4061 assert (parts != 0);
4062 inst.operands[i].imm = 0;
4063 for (j = 0; j < parts; j++, idx++)
4064 inst.operands[i].imm |= generic_bignum[idx]
4065 << (LITTLENUM_NUMBER_OF_BITS * j);
4066 inst.operands[i].reg = 0;
4067 for (j = 0; j < parts; j++, idx++)
4068 inst.operands[i].reg |= generic_bignum[idx]
4069 << (LITTLENUM_NUMBER_OF_BITS * j);
4070 inst.operands[i].regisimm = 1;
4071 }
4072 else
4073 return FAIL;
4074
4075 *str = ptr;
4076
4077 return SUCCESS;
4078 }
4079
4080 /* Returns the pseudo-register number of an FPA immediate constant,
4081 or FAIL if there isn't a valid constant here. */
4082
4083 static int
4084 parse_fpa_immediate (char ** str)
4085 {
4086 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4087 char * save_in;
4088 expressionS exp;
4089 int i;
4090 int j;
4091
4092 /* First try and match exact strings, this is to guarantee
4093 that some formats will work even for cross assembly. */
4094
4095 for (i = 0; fp_const[i]; i++)
4096 {
4097 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4098 {
4099 char *start = *str;
4100
4101 *str += strlen (fp_const[i]);
4102 if (is_end_of_line[(unsigned char) **str])
4103 return i + 8;
4104 *str = start;
4105 }
4106 }
4107
4108 /* Just because we didn't get a match doesn't mean that the constant
4109 isn't valid, just that it is in a format that we don't
4110 automatically recognize. Try parsing it with the standard
4111 expression routines. */
4112
4113 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4114
4115 /* Look for a raw floating point number. */
4116 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4117 && is_end_of_line[(unsigned char) *save_in])
4118 {
4119 for (i = 0; i < NUM_FLOAT_VALS; i++)
4120 {
4121 for (j = 0; j < MAX_LITTLENUMS; j++)
4122 {
4123 if (words[j] != fp_values[i][j])
4124 break;
4125 }
4126
4127 if (j == MAX_LITTLENUMS)
4128 {
4129 *str = save_in;
4130 return i + 8;
4131 }
4132 }
4133 }
4134
4135 /* Try and parse a more complex expression, this will probably fail
4136 unless the code uses a floating point prefix (eg "0f"). */
4137 save_in = input_line_pointer;
4138 input_line_pointer = *str;
4139 if (expression (&exp) == absolute_section
4140 && exp.X_op == O_big
4141 && exp.X_add_number < 0)
4142 {
4143 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4144 Ditto for 15. */
4145 if (gen_to_words (words, 5, (long) 15) == 0)
4146 {
4147 for (i = 0; i < NUM_FLOAT_VALS; i++)
4148 {
4149 for (j = 0; j < MAX_LITTLENUMS; j++)
4150 {
4151 if (words[j] != fp_values[i][j])
4152 break;
4153 }
4154
4155 if (j == MAX_LITTLENUMS)
4156 {
4157 *str = input_line_pointer;
4158 input_line_pointer = save_in;
4159 return i + 8;
4160 }
4161 }
4162 }
4163 }
4164
4165 *str = input_line_pointer;
4166 input_line_pointer = save_in;
4167 inst.error = _("invalid FPA immediate expression");
4168 return FAIL;
4169 }
4170
4171 /* Returns 1 if a number has "quarter-precision" float format
4172 0baBbbbbbc defgh000 00000000 00000000. */
4173
4174 static int
4175 is_quarter_float (unsigned imm)
4176 {
4177 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4178 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4179 }
4180
4181 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4182 0baBbbbbbc defgh000 00000000 00000000.
4183 The zero and minus-zero cases need special handling, since they can't be
4184 encoded in the "quarter-precision" float format, but can nonetheless be
4185 loaded as integer constants. */
4186
4187 static unsigned
4188 parse_qfloat_immediate (char **ccp, int *immed)
4189 {
4190 char *str = *ccp;
4191 char *fpnum;
4192 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4193 int found_fpchar = 0;
4194
4195 skip_past_char (&str, '#');
4196
4197 /* We must not accidentally parse an integer as a floating-point number. Make
4198 sure that the value we parse is not an integer by checking for special
4199 characters '.' or 'e'.
4200 FIXME: This is a horrible hack, but doing better is tricky because type
4201 information isn't in a very usable state at parse time. */
4202 fpnum = str;
4203 skip_whitespace (fpnum);
4204
4205 if (strncmp (fpnum, "0x", 2) == 0)
4206 return FAIL;
4207 else
4208 {
4209 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4210 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4211 {
4212 found_fpchar = 1;
4213 break;
4214 }
4215
4216 if (!found_fpchar)
4217 return FAIL;
4218 }
4219
4220 if ((str = atof_ieee (str, 's', words)) != NULL)
4221 {
4222 unsigned fpword = 0;
4223 int i;
4224
4225 /* Our FP word must be 32 bits (single-precision FP). */
4226 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4227 {
4228 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4229 fpword |= words[i];
4230 }
4231
4232 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4233 *immed = fpword;
4234 else
4235 return FAIL;
4236
4237 *ccp = str;
4238
4239 return SUCCESS;
4240 }
4241
4242 return FAIL;
4243 }
4244
4245 /* Shift operands. */
4246 enum shift_kind
4247 {
4248 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4249 };
4250
4251 struct asm_shift_name
4252 {
4253 const char *name;
4254 enum shift_kind kind;
4255 };
4256
4257 /* Third argument to parse_shift. */
4258 enum parse_shift_mode
4259 {
4260 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4261 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4262 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4263 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4264 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4265 };
4266
4267 /* Parse a <shift> specifier on an ARM data processing instruction.
4268 This has three forms:
4269
4270 (LSL|LSR|ASL|ASR|ROR) Rs
4271 (LSL|LSR|ASL|ASR|ROR) #imm
4272 RRX
4273
4274 Note that ASL is assimilated to LSL in the instruction encoding, and
4275 RRX to ROR #0 (which cannot be written as such). */
4276
4277 static int
4278 parse_shift (char **str, int i, enum parse_shift_mode mode)
4279 {
4280 const struct asm_shift_name *shift_name;
4281 enum shift_kind shift;
4282 char *s = *str;
4283 char *p = s;
4284 int reg;
4285
4286 for (p = *str; ISALPHA (*p); p++)
4287 ;
4288
4289 if (p == *str)
4290 {
4291 inst.error = _("shift expression expected");
4292 return FAIL;
4293 }
4294
4295 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4296
4297 if (shift_name == NULL)
4298 {
4299 inst.error = _("shift expression expected");
4300 return FAIL;
4301 }
4302
4303 shift = shift_name->kind;
4304
4305 switch (mode)
4306 {
4307 case NO_SHIFT_RESTRICT:
4308 case SHIFT_IMMEDIATE: break;
4309
4310 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4311 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4312 {
4313 inst.error = _("'LSL' or 'ASR' required");
4314 return FAIL;
4315 }
4316 break;
4317
4318 case SHIFT_LSL_IMMEDIATE:
4319 if (shift != SHIFT_LSL)
4320 {
4321 inst.error = _("'LSL' required");
4322 return FAIL;
4323 }
4324 break;
4325
4326 case SHIFT_ASR_IMMEDIATE:
4327 if (shift != SHIFT_ASR)
4328 {
4329 inst.error = _("'ASR' required");
4330 return FAIL;
4331 }
4332 break;
4333
4334 default: abort ();
4335 }
4336
4337 if (shift != SHIFT_RRX)
4338 {
4339 /* Whitespace can appear here if the next thing is a bare digit. */
4340 skip_whitespace (p);
4341
4342 if (mode == NO_SHIFT_RESTRICT
4343 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4344 {
4345 inst.operands[i].imm = reg;
4346 inst.operands[i].immisreg = 1;
4347 }
4348 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4349 return FAIL;
4350 }
4351 inst.operands[i].shift_kind = shift;
4352 inst.operands[i].shifted = 1;
4353 *str = p;
4354 return SUCCESS;
4355 }
4356
4357 /* Parse a <shifter_operand> for an ARM data processing instruction:
4358
4359 #<immediate>
4360 #<immediate>, <rotate>
4361 <Rm>
4362 <Rm>, <shift>
4363
4364 where <shift> is defined by parse_shift above, and <rotate> is a
4365 multiple of 2 between 0 and 30. Validation of immediate operands
4366 is deferred to md_apply_fix. */
4367
4368 static int
4369 parse_shifter_operand (char **str, int i)
4370 {
4371 int value;
4372 expressionS expr;
4373
4374 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4375 {
4376 inst.operands[i].reg = value;
4377 inst.operands[i].isreg = 1;
4378
4379 /* parse_shift will override this if appropriate */
4380 inst.reloc.exp.X_op = O_constant;
4381 inst.reloc.exp.X_add_number = 0;
4382
4383 if (skip_past_comma (str) == FAIL)
4384 return SUCCESS;
4385
4386 /* Shift operation on register. */
4387 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4388 }
4389
4390 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4391 return FAIL;
4392
4393 if (skip_past_comma (str) == SUCCESS)
4394 {
4395 /* #x, y -- ie explicit rotation by Y. */
4396 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4397 return FAIL;
4398
4399 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4400 {
4401 inst.error = _("constant expression expected");
4402 return FAIL;
4403 }
4404
4405 value = expr.X_add_number;
4406 if (value < 0 || value > 30 || value % 2 != 0)
4407 {
4408 inst.error = _("invalid rotation");
4409 return FAIL;
4410 }
4411 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4412 {
4413 inst.error = _("invalid constant");
4414 return FAIL;
4415 }
4416
4417 /* Convert to decoded value. md_apply_fix will put it back. */
4418 inst.reloc.exp.X_add_number
4419 = (((inst.reloc.exp.X_add_number << (32 - value))
4420 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4421 }
4422
4423 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4424 inst.reloc.pc_rel = 0;
4425 return SUCCESS;
4426 }
4427
4428 /* Group relocation information. Each entry in the table contains the
4429 textual name of the relocation as may appear in assembler source
4430 and must end with a colon.
4431 Along with this textual name are the relocation codes to be used if
4432 the corresponding instruction is an ALU instruction (ADD or SUB only),
4433 an LDR, an LDRS, or an LDC. */
4434
4435 struct group_reloc_table_entry
4436 {
4437 const char *name;
4438 int alu_code;
4439 int ldr_code;
4440 int ldrs_code;
4441 int ldc_code;
4442 };
4443
4444 typedef enum
4445 {
4446 /* Varieties of non-ALU group relocation. */
4447
4448 GROUP_LDR,
4449 GROUP_LDRS,
4450 GROUP_LDC
4451 } group_reloc_type;
4452
4453 static struct group_reloc_table_entry group_reloc_table[] =
4454 { /* Program counter relative: */
4455 { "pc_g0_nc",
4456 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4457 0, /* LDR */
4458 0, /* LDRS */
4459 0 }, /* LDC */
4460 { "pc_g0",
4461 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4462 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4463 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4464 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4465 { "pc_g1_nc",
4466 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4467 0, /* LDR */
4468 0, /* LDRS */
4469 0 }, /* LDC */
4470 { "pc_g1",
4471 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4472 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4473 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4474 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4475 { "pc_g2",
4476 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4477 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4478 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4479 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4480 /* Section base relative */
4481 { "sb_g0_nc",
4482 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4483 0, /* LDR */
4484 0, /* LDRS */
4485 0 }, /* LDC */
4486 { "sb_g0",
4487 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4488 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4489 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4490 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4491 { "sb_g1_nc",
4492 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4493 0, /* LDR */
4494 0, /* LDRS */
4495 0 }, /* LDC */
4496 { "sb_g1",
4497 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4498 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4499 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4500 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4501 { "sb_g2",
4502 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4503 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4504 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4505 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4506
4507 /* Given the address of a pointer pointing to the textual name of a group
4508 relocation as may appear in assembler source, attempt to find its details
4509 in group_reloc_table. The pointer will be updated to the character after
4510 the trailing colon. On failure, FAIL will be returned; SUCCESS
4511 otherwise. On success, *entry will be updated to point at the relevant
4512 group_reloc_table entry. */
4513
4514 static int
4515 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4516 {
4517 unsigned int i;
4518 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4519 {
4520 int length = strlen (group_reloc_table[i].name);
4521
4522 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 &&
4523 (*str)[length] == ':')
4524 {
4525 *out = &group_reloc_table[i];
4526 *str += (length + 1);
4527 return SUCCESS;
4528 }
4529 }
4530
4531 return FAIL;
4532 }
4533
4534 /* Parse a <shifter_operand> for an ARM data processing instruction
4535 (as for parse_shifter_operand) where group relocations are allowed:
4536
4537 #<immediate>
4538 #<immediate>, <rotate>
4539 #:<group_reloc>:<expression>
4540 <Rm>
4541 <Rm>, <shift>
4542
4543 where <group_reloc> is one of the strings defined in group_reloc_table.
4544 The hashes are optional.
4545
4546 Everything else is as for parse_shifter_operand. */
4547
4548 static parse_operand_result
4549 parse_shifter_operand_group_reloc (char **str, int i)
4550 {
4551 /* Determine if we have the sequence of characters #: or just :
4552 coming next. If we do, then we check for a group relocation.
4553 If we don't, punt the whole lot to parse_shifter_operand. */
4554
4555 if (((*str)[0] == '#' && (*str)[1] == ':')
4556 || (*str)[0] == ':')
4557 {
4558 struct group_reloc_table_entry *entry;
4559
4560 if ((*str)[0] == '#')
4561 (*str) += 2;
4562 else
4563 (*str)++;
4564
4565 /* Try to parse a group relocation. Anything else is an error. */
4566 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4567 {
4568 inst.error = _("unknown group relocation");
4569 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4570 }
4571
4572 /* We now have the group relocation table entry corresponding to
4573 the name in the assembler source. Next, we parse the expression. */
4574 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4575 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4576
4577 /* Record the relocation type (always the ALU variant here). */
4578 inst.reloc.type = entry->alu_code;
4579 assert (inst.reloc.type != 0);
4580
4581 return PARSE_OPERAND_SUCCESS;
4582 }
4583 else
4584 return parse_shifter_operand (str, i) == SUCCESS
4585 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4586
4587 /* Never reached. */
4588 }
4589
4590 /* Parse all forms of an ARM address expression. Information is written
4591 to inst.operands[i] and/or inst.reloc.
4592
4593 Preindexed addressing (.preind=1):
4594
4595 [Rn, #offset] .reg=Rn .reloc.exp=offset
4596 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4597 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4598 .shift_kind=shift .reloc.exp=shift_imm
4599
4600 These three may have a trailing ! which causes .writeback to be set also.
4601
4602 Postindexed addressing (.postind=1, .writeback=1):
4603
4604 [Rn], #offset .reg=Rn .reloc.exp=offset
4605 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4606 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4607 .shift_kind=shift .reloc.exp=shift_imm
4608
4609 Unindexed addressing (.preind=0, .postind=0):
4610
4611 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4612
4613 Other:
4614
4615 [Rn]{!} shorthand for [Rn,#0]{!}
4616 =immediate .isreg=0 .reloc.exp=immediate
4617 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4618
4619 It is the caller's responsibility to check for addressing modes not
4620 supported by the instruction, and to set inst.reloc.type. */
4621
4622 static parse_operand_result
4623 parse_address_main (char **str, int i, int group_relocations,
4624 group_reloc_type group_type)
4625 {
4626 char *p = *str;
4627 int reg;
4628
4629 if (skip_past_char (&p, '[') == FAIL)
4630 {
4631 if (skip_past_char (&p, '=') == FAIL)
4632 {
4633 /* bare address - translate to PC-relative offset */
4634 inst.reloc.pc_rel = 1;
4635 inst.operands[i].reg = REG_PC;
4636 inst.operands[i].isreg = 1;
4637 inst.operands[i].preind = 1;
4638 }
4639 /* else a load-constant pseudo op, no special treatment needed here */
4640
4641 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4642 return PARSE_OPERAND_FAIL;
4643
4644 *str = p;
4645 return PARSE_OPERAND_SUCCESS;
4646 }
4647
4648 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4649 {
4650 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4651 return PARSE_OPERAND_FAIL;
4652 }
4653 inst.operands[i].reg = reg;
4654 inst.operands[i].isreg = 1;
4655
4656 if (skip_past_comma (&p) == SUCCESS)
4657 {
4658 inst.operands[i].preind = 1;
4659
4660 if (*p == '+') p++;
4661 else if (*p == '-') p++, inst.operands[i].negative = 1;
4662
4663 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4664 {
4665 inst.operands[i].imm = reg;
4666 inst.operands[i].immisreg = 1;
4667
4668 if (skip_past_comma (&p) == SUCCESS)
4669 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4670 return PARSE_OPERAND_FAIL;
4671 }
4672 else if (skip_past_char (&p, ':') == SUCCESS)
4673 {
4674 /* FIXME: '@' should be used here, but it's filtered out by generic
4675 code before we get to see it here. This may be subject to
4676 change. */
4677 expressionS exp;
4678 my_get_expression (&exp, &p, GE_NO_PREFIX);
4679 if (exp.X_op != O_constant)
4680 {
4681 inst.error = _("alignment must be constant");
4682 return PARSE_OPERAND_FAIL;
4683 }
4684 inst.operands[i].imm = exp.X_add_number << 8;
4685 inst.operands[i].immisalign = 1;
4686 /* Alignments are not pre-indexes. */
4687 inst.operands[i].preind = 0;
4688 }
4689 else
4690 {
4691 if (inst.operands[i].negative)
4692 {
4693 inst.operands[i].negative = 0;
4694 p--;
4695 }
4696
4697 if (group_relocations &&
4698 ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4699
4700 {
4701 struct group_reloc_table_entry *entry;
4702
4703 /* Skip over the #: or : sequence. */
4704 if (*p == '#')
4705 p += 2;
4706 else
4707 p++;
4708
4709 /* Try to parse a group relocation. Anything else is an
4710 error. */
4711 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4712 {
4713 inst.error = _("unknown group relocation");
4714 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4715 }
4716
4717 /* We now have the group relocation table entry corresponding to
4718 the name in the assembler source. Next, we parse the
4719 expression. */
4720 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4721 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4722
4723 /* Record the relocation type. */
4724 switch (group_type)
4725 {
4726 case GROUP_LDR:
4727 inst.reloc.type = entry->ldr_code;
4728 break;
4729
4730 case GROUP_LDRS:
4731 inst.reloc.type = entry->ldrs_code;
4732 break;
4733
4734 case GROUP_LDC:
4735 inst.reloc.type = entry->ldc_code;
4736 break;
4737
4738 default:
4739 assert (0);
4740 }
4741
4742 if (inst.reloc.type == 0)
4743 {
4744 inst.error = _("this group relocation is not allowed on this instruction");
4745 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4746 }
4747 }
4748 else
4749 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4750 return PARSE_OPERAND_FAIL;
4751 }
4752 }
4753
4754 if (skip_past_char (&p, ']') == FAIL)
4755 {
4756 inst.error = _("']' expected");
4757 return PARSE_OPERAND_FAIL;
4758 }
4759
4760 if (skip_past_char (&p, '!') == SUCCESS)
4761 inst.operands[i].writeback = 1;
4762
4763 else if (skip_past_comma (&p) == SUCCESS)
4764 {
4765 if (skip_past_char (&p, '{') == SUCCESS)
4766 {
4767 /* [Rn], {expr} - unindexed, with option */
4768 if (parse_immediate (&p, &inst.operands[i].imm,
4769 0, 255, TRUE) == FAIL)
4770 return PARSE_OPERAND_FAIL;
4771
4772 if (skip_past_char (&p, '}') == FAIL)
4773 {
4774 inst.error = _("'}' expected at end of 'option' field");
4775 return PARSE_OPERAND_FAIL;
4776 }
4777 if (inst.operands[i].preind)
4778 {
4779 inst.error = _("cannot combine index with option");
4780 return PARSE_OPERAND_FAIL;
4781 }
4782 *str = p;
4783 return PARSE_OPERAND_SUCCESS;
4784 }
4785 else
4786 {
4787 inst.operands[i].postind = 1;
4788 inst.operands[i].writeback = 1;
4789
4790 if (inst.operands[i].preind)
4791 {
4792 inst.error = _("cannot combine pre- and post-indexing");
4793 return PARSE_OPERAND_FAIL;
4794 }
4795
4796 if (*p == '+') p++;
4797 else if (*p == '-') p++, inst.operands[i].negative = 1;
4798
4799 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4800 {
4801 /* We might be using the immediate for alignment already. If we
4802 are, OR the register number into the low-order bits. */
4803 if (inst.operands[i].immisalign)
4804 inst.operands[i].imm |= reg;
4805 else
4806 inst.operands[i].imm = reg;
4807 inst.operands[i].immisreg = 1;
4808
4809 if (skip_past_comma (&p) == SUCCESS)
4810 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4811 return PARSE_OPERAND_FAIL;
4812 }
4813 else
4814 {
4815 if (inst.operands[i].negative)
4816 {
4817 inst.operands[i].negative = 0;
4818 p--;
4819 }
4820 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4821 return PARSE_OPERAND_FAIL;
4822 }
4823 }
4824 }
4825
4826 /* If at this point neither .preind nor .postind is set, we have a
4827 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4828 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4829 {
4830 inst.operands[i].preind = 1;
4831 inst.reloc.exp.X_op = O_constant;
4832 inst.reloc.exp.X_add_number = 0;
4833 }
4834 *str = p;
4835 return PARSE_OPERAND_SUCCESS;
4836 }
4837
4838 static int
4839 parse_address (char **str, int i)
4840 {
4841 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4842 ? SUCCESS : FAIL;
4843 }
4844
4845 static parse_operand_result
4846 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4847 {
4848 return parse_address_main (str, i, 1, type);
4849 }
4850
4851 /* Parse an operand for a MOVW or MOVT instruction. */
4852 static int
4853 parse_half (char **str)
4854 {
4855 char * p;
4856
4857 p = *str;
4858 skip_past_char (&p, '#');
4859 if (strncasecmp (p, ":lower16:", 9) == 0)
4860 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4861 else if (strncasecmp (p, ":upper16:", 9) == 0)
4862 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4863
4864 if (inst.reloc.type != BFD_RELOC_UNUSED)
4865 {
4866 p += 9;
4867 skip_whitespace(p);
4868 }
4869
4870 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4871 return FAIL;
4872
4873 if (inst.reloc.type == BFD_RELOC_UNUSED)
4874 {
4875 if (inst.reloc.exp.X_op != O_constant)
4876 {
4877 inst.error = _("constant expression expected");
4878 return FAIL;
4879 }
4880 if (inst.reloc.exp.X_add_number < 0
4881 || inst.reloc.exp.X_add_number > 0xffff)
4882 {
4883 inst.error = _("immediate value out of range");
4884 return FAIL;
4885 }
4886 }
4887 *str = p;
4888 return SUCCESS;
4889 }
4890
4891 /* Miscellaneous. */
4892
4893 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4894 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4895 static int
4896 parse_psr (char **str)
4897 {
4898 char *p;
4899 unsigned long psr_field;
4900 const struct asm_psr *psr;
4901 char *start;
4902
4903 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4904 feature for ease of use and backwards compatibility. */
4905 p = *str;
4906 if (strncasecmp (p, "SPSR", 4) == 0)
4907 psr_field = SPSR_BIT;
4908 else if (strncasecmp (p, "CPSR", 4) == 0)
4909 psr_field = 0;
4910 else
4911 {
4912 start = p;
4913 do
4914 p++;
4915 while (ISALNUM (*p) || *p == '_');
4916
4917 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4918 if (!psr)
4919 return FAIL;
4920
4921 *str = p;
4922 return psr->field;
4923 }
4924
4925 p += 4;
4926 if (*p == '_')
4927 {
4928 /* A suffix follows. */
4929 p++;
4930 start = p;
4931
4932 do
4933 p++;
4934 while (ISALNUM (*p) || *p == '_');
4935
4936 psr = hash_find_n (arm_psr_hsh, start, p - start);
4937 if (!psr)
4938 goto error;
4939
4940 psr_field |= psr->field;
4941 }
4942 else
4943 {
4944 if (ISALNUM (*p))
4945 goto error; /* Garbage after "[CS]PSR". */
4946
4947 psr_field |= (PSR_c | PSR_f);
4948 }
4949 *str = p;
4950 return psr_field;
4951
4952 error:
4953 inst.error = _("flag for {c}psr instruction expected");
4954 return FAIL;
4955 }
4956
4957 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4958 value suitable for splatting into the AIF field of the instruction. */
4959
4960 static int
4961 parse_cps_flags (char **str)
4962 {
4963 int val = 0;
4964 int saw_a_flag = 0;
4965 char *s = *str;
4966
4967 for (;;)
4968 switch (*s++)
4969 {
4970 case '\0': case ',':
4971 goto done;
4972
4973 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4974 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4975 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4976
4977 default:
4978 inst.error = _("unrecognized CPS flag");
4979 return FAIL;
4980 }
4981
4982 done:
4983 if (saw_a_flag == 0)
4984 {
4985 inst.error = _("missing CPS flags");
4986 return FAIL;
4987 }
4988
4989 *str = s - 1;
4990 return val;
4991 }
4992
4993 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4994 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4995
4996 static int
4997 parse_endian_specifier (char **str)
4998 {
4999 int little_endian;
5000 char *s = *str;
5001
5002 if (strncasecmp (s, "BE", 2))
5003 little_endian = 0;
5004 else if (strncasecmp (s, "LE", 2))
5005 little_endian = 1;
5006 else
5007 {
5008 inst.error = _("valid endian specifiers are be or le");
5009 return FAIL;
5010 }
5011
5012 if (ISALNUM (s[2]) || s[2] == '_')
5013 {
5014 inst.error = _("valid endian specifiers are be or le");
5015 return FAIL;
5016 }
5017
5018 *str = s + 2;
5019 return little_endian;
5020 }
5021
5022 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5023 value suitable for poking into the rotate field of an sxt or sxta
5024 instruction, or FAIL on error. */
5025
5026 static int
5027 parse_ror (char **str)
5028 {
5029 int rot;
5030 char *s = *str;
5031
5032 if (strncasecmp (s, "ROR", 3) == 0)
5033 s += 3;
5034 else
5035 {
5036 inst.error = _("missing rotation field after comma");
5037 return FAIL;
5038 }
5039
5040 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5041 return FAIL;
5042
5043 switch (rot)
5044 {
5045 case 0: *str = s; return 0x0;
5046 case 8: *str = s; return 0x1;
5047 case 16: *str = s; return 0x2;
5048 case 24: *str = s; return 0x3;
5049
5050 default:
5051 inst.error = _("rotation can only be 0, 8, 16, or 24");
5052 return FAIL;
5053 }
5054 }
5055
5056 /* Parse a conditional code (from conds[] below). The value returned is in the
5057 range 0 .. 14, or FAIL. */
5058 static int
5059 parse_cond (char **str)
5060 {
5061 char *p, *q;
5062 const struct asm_cond *c;
5063
5064 p = q = *str;
5065 while (ISALPHA (*q))
5066 q++;
5067
5068 c = hash_find_n (arm_cond_hsh, p, q - p);
5069 if (!c)
5070 {
5071 inst.error = _("condition required");
5072 return FAIL;
5073 }
5074
5075 *str = q;
5076 return c->value;
5077 }
5078
5079 /* Parse an option for a barrier instruction. Returns the encoding for the
5080 option, or FAIL. */
5081 static int
5082 parse_barrier (char **str)
5083 {
5084 char *p, *q;
5085 const struct asm_barrier_opt *o;
5086
5087 p = q = *str;
5088 while (ISALPHA (*q))
5089 q++;
5090
5091 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5092 if (!o)
5093 return FAIL;
5094
5095 *str = q;
5096 return o->value;
5097 }
5098
5099 /* Parse the operands of a table branch instruction. Similar to a memory
5100 operand. */
5101 static int
5102 parse_tb (char **str)
5103 {
5104 char * p = *str;
5105 int reg;
5106
5107 if (skip_past_char (&p, '[') == FAIL)
5108 {
5109 inst.error = _("'[' expected");
5110 return FAIL;
5111 }
5112
5113 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5114 {
5115 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5116 return FAIL;
5117 }
5118 inst.operands[0].reg = reg;
5119
5120 if (skip_past_comma (&p) == FAIL)
5121 {
5122 inst.error = _("',' expected");
5123 return FAIL;
5124 }
5125
5126 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5127 {
5128 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5129 return FAIL;
5130 }
5131 inst.operands[0].imm = reg;
5132
5133 if (skip_past_comma (&p) == SUCCESS)
5134 {
5135 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5136 return FAIL;
5137 if (inst.reloc.exp.X_add_number != 1)
5138 {
5139 inst.error = _("invalid shift");
5140 return FAIL;
5141 }
5142 inst.operands[0].shifted = 1;
5143 }
5144
5145 if (skip_past_char (&p, ']') == FAIL)
5146 {
5147 inst.error = _("']' expected");
5148 return FAIL;
5149 }
5150 *str = p;
5151 return SUCCESS;
5152 }
5153
5154 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5155 information on the types the operands can take and how they are encoded.
5156 Up to four operands may be read; this function handles setting the
5157 ".present" field for each read operand itself.
5158 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5159 else returns FAIL. */
5160
5161 static int
5162 parse_neon_mov (char **str, int *which_operand)
5163 {
5164 int i = *which_operand, val;
5165 enum arm_reg_type rtype;
5166 char *ptr = *str;
5167 struct neon_type_el optype;
5168
5169 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5170 {
5171 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5172 inst.operands[i].reg = val;
5173 inst.operands[i].isscalar = 1;
5174 inst.operands[i].vectype = optype;
5175 inst.operands[i++].present = 1;
5176
5177 if (skip_past_comma (&ptr) == FAIL)
5178 goto wanted_comma;
5179
5180 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5181 goto wanted_arm;
5182
5183 inst.operands[i].reg = val;
5184 inst.operands[i].isreg = 1;
5185 inst.operands[i].present = 1;
5186 }
5187 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5188 != FAIL)
5189 {
5190 /* Cases 0, 1, 2, 3, 5 (D only). */
5191 if (skip_past_comma (&ptr) == FAIL)
5192 goto wanted_comma;
5193
5194 inst.operands[i].reg = val;
5195 inst.operands[i].isreg = 1;
5196 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5197 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5198 inst.operands[i].isvec = 1;
5199 inst.operands[i].vectype = optype;
5200 inst.operands[i++].present = 1;
5201
5202 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5203 {
5204 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5205 Case 13: VMOV <Sd>, <Rm> */
5206 inst.operands[i].reg = val;
5207 inst.operands[i].isreg = 1;
5208 inst.operands[i].present = 1;
5209
5210 if (rtype == REG_TYPE_NQ)
5211 {
5212 first_error (_("can't use Neon quad register here"));
5213 return FAIL;
5214 }
5215 else if (rtype != REG_TYPE_VFS)
5216 {
5217 i++;
5218 if (skip_past_comma (&ptr) == FAIL)
5219 goto wanted_comma;
5220 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5221 goto wanted_arm;
5222 inst.operands[i].reg = val;
5223 inst.operands[i].isreg = 1;
5224 inst.operands[i].present = 1;
5225 }
5226 }
5227 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5228 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5229 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5230 Case 10: VMOV.F32 <Sd>, #<imm>
5231 Case 11: VMOV.F64 <Dd>, #<imm> */
5232 inst.operands[i].immisfloat = 1;
5233 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5234 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5235 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5236 ;
5237 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5238 &optype)) != FAIL)
5239 {
5240 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5241 Case 1: VMOV<c><q> <Dd>, <Dm>
5242 Case 8: VMOV.F32 <Sd>, <Sm>
5243 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5244
5245 inst.operands[i].reg = val;
5246 inst.operands[i].isreg = 1;
5247 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5248 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5249 inst.operands[i].isvec = 1;
5250 inst.operands[i].vectype = optype;
5251 inst.operands[i].present = 1;
5252
5253 if (skip_past_comma (&ptr) == SUCCESS)
5254 {
5255 /* Case 15. */
5256 i++;
5257
5258 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5259 goto wanted_arm;
5260
5261 inst.operands[i].reg = val;
5262 inst.operands[i].isreg = 1;
5263 inst.operands[i++].present = 1;
5264
5265 if (skip_past_comma (&ptr) == FAIL)
5266 goto wanted_comma;
5267
5268 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5269 goto wanted_arm;
5270
5271 inst.operands[i].reg = val;
5272 inst.operands[i].isreg = 1;
5273 inst.operands[i++].present = 1;
5274 }
5275 }
5276 else
5277 {
5278 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5279 return FAIL;
5280 }
5281 }
5282 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5283 {
5284 /* Cases 6, 7. */
5285 inst.operands[i].reg = val;
5286 inst.operands[i].isreg = 1;
5287 inst.operands[i++].present = 1;
5288
5289 if (skip_past_comma (&ptr) == FAIL)
5290 goto wanted_comma;
5291
5292 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5293 {
5294 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5295 inst.operands[i].reg = val;
5296 inst.operands[i].isscalar = 1;
5297 inst.operands[i].present = 1;
5298 inst.operands[i].vectype = optype;
5299 }
5300 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5301 {
5302 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5303 inst.operands[i].reg = val;
5304 inst.operands[i].isreg = 1;
5305 inst.operands[i++].present = 1;
5306
5307 if (skip_past_comma (&ptr) == FAIL)
5308 goto wanted_comma;
5309
5310 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5311 == FAIL)
5312 {
5313 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5314 return FAIL;
5315 }
5316
5317 inst.operands[i].reg = val;
5318 inst.operands[i].isreg = 1;
5319 inst.operands[i].isvec = 1;
5320 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5321 inst.operands[i].vectype = optype;
5322 inst.operands[i].present = 1;
5323
5324 if (rtype == REG_TYPE_VFS)
5325 {
5326 /* Case 14. */
5327 i++;
5328 if (skip_past_comma (&ptr) == FAIL)
5329 goto wanted_comma;
5330 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5331 &optype)) == FAIL)
5332 {
5333 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5334 return FAIL;
5335 }
5336 inst.operands[i].reg = val;
5337 inst.operands[i].isreg = 1;
5338 inst.operands[i].isvec = 1;
5339 inst.operands[i].issingle = 1;
5340 inst.operands[i].vectype = optype;
5341 inst.operands[i].present = 1;
5342 }
5343 }
5344 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5345 != FAIL)
5346 {
5347 /* Case 13. */
5348 inst.operands[i].reg = val;
5349 inst.operands[i].isreg = 1;
5350 inst.operands[i].isvec = 1;
5351 inst.operands[i].issingle = 1;
5352 inst.operands[i].vectype = optype;
5353 inst.operands[i++].present = 1;
5354 }
5355 }
5356 else
5357 {
5358 first_error (_("parse error"));
5359 return FAIL;
5360 }
5361
5362 /* Successfully parsed the operands. Update args. */
5363 *which_operand = i;
5364 *str = ptr;
5365 return SUCCESS;
5366
5367 wanted_comma:
5368 first_error (_("expected comma"));
5369 return FAIL;
5370
5371 wanted_arm:
5372 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5373 return FAIL;
5374 }
5375
5376 /* Matcher codes for parse_operands. */
5377 enum operand_parse_code
5378 {
5379 OP_stop, /* end of line */
5380
5381 OP_RR, /* ARM register */
5382 OP_RRnpc, /* ARM register, not r15 */
5383 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5384 OP_RRw, /* ARM register, not r15, optional trailing ! */
5385 OP_RCP, /* Coprocessor number */
5386 OP_RCN, /* Coprocessor register */
5387 OP_RF, /* FPA register */
5388 OP_RVS, /* VFP single precision register */
5389 OP_RVD, /* VFP double precision register (0..15) */
5390 OP_RND, /* Neon double precision register (0..31) */
5391 OP_RNQ, /* Neon quad precision register */
5392 OP_RVSD, /* VFP single or double precision register */
5393 OP_RNDQ, /* Neon double or quad precision register */
5394 OP_RNSDQ, /* Neon single, double or quad precision register */
5395 OP_RNSC, /* Neon scalar D[X] */
5396 OP_RVC, /* VFP control register */
5397 OP_RMF, /* Maverick F register */
5398 OP_RMD, /* Maverick D register */
5399 OP_RMFX, /* Maverick FX register */
5400 OP_RMDX, /* Maverick DX register */
5401 OP_RMAX, /* Maverick AX register */
5402 OP_RMDS, /* Maverick DSPSC register */
5403 OP_RIWR, /* iWMMXt wR register */
5404 OP_RIWC, /* iWMMXt wC register */
5405 OP_RIWG, /* iWMMXt wCG register */
5406 OP_RXA, /* XScale accumulator register */
5407
5408 OP_REGLST, /* ARM register list */
5409 OP_VRSLST, /* VFP single-precision register list */
5410 OP_VRDLST, /* VFP double-precision register list */
5411 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5412 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5413 OP_NSTRLST, /* Neon element/structure list */
5414
5415 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5416 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5417 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5418 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5419 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5420 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5421 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5422 OP_VMOV, /* Neon VMOV operands. */
5423 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5424 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5425 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5426
5427 OP_I0, /* immediate zero */
5428 OP_I7, /* immediate value 0 .. 7 */
5429 OP_I15, /* 0 .. 15 */
5430 OP_I16, /* 1 .. 16 */
5431 OP_I16z, /* 0 .. 16 */
5432 OP_I31, /* 0 .. 31 */
5433 OP_I31w, /* 0 .. 31, optional trailing ! */
5434 OP_I32, /* 1 .. 32 */
5435 OP_I32z, /* 0 .. 32 */
5436 OP_I63, /* 0 .. 63 */
5437 OP_I63s, /* -64 .. 63 */
5438 OP_I64, /* 1 .. 64 */
5439 OP_I64z, /* 0 .. 64 */
5440 OP_I255, /* 0 .. 255 */
5441
5442 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5443 OP_I7b, /* 0 .. 7 */
5444 OP_I15b, /* 0 .. 15 */
5445 OP_I31b, /* 0 .. 31 */
5446
5447 OP_SH, /* shifter operand */
5448 OP_SHG, /* shifter operand with possible group relocation */
5449 OP_ADDR, /* Memory address expression (any mode) */
5450 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5451 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5452 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5453 OP_EXP, /* arbitrary expression */
5454 OP_EXPi, /* same, with optional immediate prefix */
5455 OP_EXPr, /* same, with optional relocation suffix */
5456 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5457
5458 OP_CPSF, /* CPS flags */
5459 OP_ENDI, /* Endianness specifier */
5460 OP_PSR, /* CPSR/SPSR mask for msr */
5461 OP_COND, /* conditional code */
5462 OP_TB, /* Table branch. */
5463
5464 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5465 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5466
5467 OP_RRnpc_I0, /* ARM register or literal 0 */
5468 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5469 OP_RR_EXi, /* ARM register or expression with imm prefix */
5470 OP_RF_IF, /* FPA register or immediate */
5471 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5472 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5473
5474 /* Optional operands. */
5475 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5476 OP_oI31b, /* 0 .. 31 */
5477 OP_oI32b, /* 1 .. 32 */
5478 OP_oIffffb, /* 0 .. 65535 */
5479 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5480
5481 OP_oRR, /* ARM register */
5482 OP_oRRnpc, /* ARM register, not the PC */
5483 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5484 OP_oRND, /* Optional Neon double precision register */
5485 OP_oRNQ, /* Optional Neon quad precision register */
5486 OP_oRNDQ, /* Optional Neon double or quad precision register */
5487 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5488 OP_oSHll, /* LSL immediate */
5489 OP_oSHar, /* ASR immediate */
5490 OP_oSHllar, /* LSL or ASR immediate */
5491 OP_oROR, /* ROR 0/8/16/24 */
5492 OP_oBARRIER, /* Option argument for a barrier instruction. */
5493
5494 OP_FIRST_OPTIONAL = OP_oI7b
5495 };
5496
5497 /* Generic instruction operand parser. This does no encoding and no
5498 semantic validation; it merely squirrels values away in the inst
5499 structure. Returns SUCCESS or FAIL depending on whether the
5500 specified grammar matched. */
5501 static int
5502 parse_operands (char *str, const unsigned char *pattern)
5503 {
5504 unsigned const char *upat = pattern;
5505 char *backtrack_pos = 0;
5506 const char *backtrack_error = 0;
5507 int i, val, backtrack_index = 0;
5508 enum arm_reg_type rtype;
5509 parse_operand_result result;
5510
5511 #define po_char_or_fail(chr) do { \
5512 if (skip_past_char (&str, chr) == FAIL) \
5513 goto bad_args; \
5514 } while (0)
5515
5516 #define po_reg_or_fail(regtype) do { \
5517 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5518 &inst.operands[i].vectype); \
5519 if (val == FAIL) \
5520 { \
5521 first_error (_(reg_expected_msgs[regtype])); \
5522 goto failure; \
5523 } \
5524 inst.operands[i].reg = val; \
5525 inst.operands[i].isreg = 1; \
5526 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5527 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5528 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5529 || rtype == REG_TYPE_VFD \
5530 || rtype == REG_TYPE_NQ); \
5531 } while (0)
5532
5533 #define po_reg_or_goto(regtype, label) do { \
5534 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5535 &inst.operands[i].vectype); \
5536 if (val == FAIL) \
5537 goto label; \
5538 \
5539 inst.operands[i].reg = val; \
5540 inst.operands[i].isreg = 1; \
5541 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5542 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5543 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5544 || rtype == REG_TYPE_VFD \
5545 || rtype == REG_TYPE_NQ); \
5546 } while (0)
5547
5548 #define po_imm_or_fail(min, max, popt) do { \
5549 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5550 goto failure; \
5551 inst.operands[i].imm = val; \
5552 } while (0)
5553
5554 #define po_scalar_or_goto(elsz, label) do { \
5555 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5556 if (val == FAIL) \
5557 goto label; \
5558 inst.operands[i].reg = val; \
5559 inst.operands[i].isscalar = 1; \
5560 } while (0)
5561
5562 #define po_misc_or_fail(expr) do { \
5563 if (expr) \
5564 goto failure; \
5565 } while (0)
5566
5567 #define po_misc_or_fail_no_backtrack(expr) do { \
5568 result = expr; \
5569 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5570 backtrack_pos = 0; \
5571 if (result != PARSE_OPERAND_SUCCESS) \
5572 goto failure; \
5573 } while (0)
5574
5575 skip_whitespace (str);
5576
5577 for (i = 0; upat[i] != OP_stop; i++)
5578 {
5579 if (upat[i] >= OP_FIRST_OPTIONAL)
5580 {
5581 /* Remember where we are in case we need to backtrack. */
5582 assert (!backtrack_pos);
5583 backtrack_pos = str;
5584 backtrack_error = inst.error;
5585 backtrack_index = i;
5586 }
5587
5588 if (i > 0 && (i > 1 || inst.operands[0].present))
5589 po_char_or_fail (',');
5590
5591 switch (upat[i])
5592 {
5593 /* Registers */
5594 case OP_oRRnpc:
5595 case OP_RRnpc:
5596 case OP_oRR:
5597 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5598 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5599 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5600 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5601 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5602 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5603 case OP_oRND:
5604 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5605 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5606 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5607 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5608 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5609 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5610 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5611 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5612 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5613 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5614 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5615 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5616 case OP_oRNQ:
5617 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5618 case OP_oRNDQ:
5619 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5620 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5621 case OP_oRNSDQ:
5622 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5623
5624 /* Neon scalar. Using an element size of 8 means that some invalid
5625 scalars are accepted here, so deal with those in later code. */
5626 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5627
5628 /* WARNING: We can expand to two operands here. This has the potential
5629 to totally confuse the backtracking mechanism! It will be OK at
5630 least as long as we don't try to use optional args as well,
5631 though. */
5632 case OP_NILO:
5633 {
5634 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5635 inst.operands[i].present = 1;
5636 i++;
5637 skip_past_comma (&str);
5638 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5639 break;
5640 one_reg_only:
5641 /* Optional register operand was omitted. Unfortunately, it's in
5642 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5643 here (this is a bit grotty). */
5644 inst.operands[i] = inst.operands[i-1];
5645 inst.operands[i-1].present = 0;
5646 break;
5647 try_imm:
5648 /* There's a possibility of getting a 64-bit immediate here, so
5649 we need special handling. */
5650 if (parse_big_immediate (&str, i) == FAIL)
5651 {
5652 inst.error = _("immediate value is out of range");
5653 goto failure;
5654 }
5655 }
5656 break;
5657
5658 case OP_RNDQ_I0:
5659 {
5660 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5661 break;
5662 try_imm0:
5663 po_imm_or_fail (0, 0, TRUE);
5664 }
5665 break;
5666
5667 case OP_RVSD_I0:
5668 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5669 break;
5670
5671 case OP_RR_RNSC:
5672 {
5673 po_scalar_or_goto (8, try_rr);
5674 break;
5675 try_rr:
5676 po_reg_or_fail (REG_TYPE_RN);
5677 }
5678 break;
5679
5680 case OP_RNSDQ_RNSC:
5681 {
5682 po_scalar_or_goto (8, try_nsdq);
5683 break;
5684 try_nsdq:
5685 po_reg_or_fail (REG_TYPE_NSDQ);
5686 }
5687 break;
5688
5689 case OP_RNDQ_RNSC:
5690 {
5691 po_scalar_or_goto (8, try_ndq);
5692 break;
5693 try_ndq:
5694 po_reg_or_fail (REG_TYPE_NDQ);
5695 }
5696 break;
5697
5698 case OP_RND_RNSC:
5699 {
5700 po_scalar_or_goto (8, try_vfd);
5701 break;
5702 try_vfd:
5703 po_reg_or_fail (REG_TYPE_VFD);
5704 }
5705 break;
5706
5707 case OP_VMOV:
5708 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5709 not careful then bad things might happen. */
5710 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5711 break;
5712
5713 case OP_RNDQ_IMVNb:
5714 {
5715 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5716 break;
5717 try_mvnimm:
5718 /* There's a possibility of getting a 64-bit immediate here, so
5719 we need special handling. */
5720 if (parse_big_immediate (&str, i) == FAIL)
5721 {
5722 inst.error = _("immediate value is out of range");
5723 goto failure;
5724 }
5725 }
5726 break;
5727
5728 case OP_RNDQ_I63b:
5729 {
5730 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5731 break;
5732 try_shimm:
5733 po_imm_or_fail (0, 63, TRUE);
5734 }
5735 break;
5736
5737 case OP_RRnpcb:
5738 po_char_or_fail ('[');
5739 po_reg_or_fail (REG_TYPE_RN);
5740 po_char_or_fail (']');
5741 break;
5742
5743 case OP_RRw:
5744 case OP_oRRw:
5745 po_reg_or_fail (REG_TYPE_RN);
5746 if (skip_past_char (&str, '!') == SUCCESS)
5747 inst.operands[i].writeback = 1;
5748 break;
5749
5750 /* Immediates */
5751 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5752 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5753 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5754 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5755 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5756 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5757 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5758 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5759 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5760 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5761 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5762 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5763
5764 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5765 case OP_oI7b:
5766 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5767 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5768 case OP_oI31b:
5769 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5770 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5771 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5772
5773 /* Immediate variants */
5774 case OP_oI255c:
5775 po_char_or_fail ('{');
5776 po_imm_or_fail (0, 255, TRUE);
5777 po_char_or_fail ('}');
5778 break;
5779
5780 case OP_I31w:
5781 /* The expression parser chokes on a trailing !, so we have
5782 to find it first and zap it. */
5783 {
5784 char *s = str;
5785 while (*s && *s != ',')
5786 s++;
5787 if (s[-1] == '!')
5788 {
5789 s[-1] = '\0';
5790 inst.operands[i].writeback = 1;
5791 }
5792 po_imm_or_fail (0, 31, TRUE);
5793 if (str == s - 1)
5794 str = s;
5795 }
5796 break;
5797
5798 /* Expressions */
5799 case OP_EXPi: EXPi:
5800 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5801 GE_OPT_PREFIX));
5802 break;
5803
5804 case OP_EXP:
5805 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5806 GE_NO_PREFIX));
5807 break;
5808
5809 case OP_EXPr: EXPr:
5810 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5811 GE_NO_PREFIX));
5812 if (inst.reloc.exp.X_op == O_symbol)
5813 {
5814 val = parse_reloc (&str);
5815 if (val == -1)
5816 {
5817 inst.error = _("unrecognized relocation suffix");
5818 goto failure;
5819 }
5820 else if (val != BFD_RELOC_UNUSED)
5821 {
5822 inst.operands[i].imm = val;
5823 inst.operands[i].hasreloc = 1;
5824 }
5825 }
5826 break;
5827
5828 /* Operand for MOVW or MOVT. */
5829 case OP_HALF:
5830 po_misc_or_fail (parse_half (&str));
5831 break;
5832
5833 /* Register or expression */
5834 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5835 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5836
5837 /* Register or immediate */
5838 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5839 I0: po_imm_or_fail (0, 0, FALSE); break;
5840
5841 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5842 IF:
5843 if (!is_immediate_prefix (*str))
5844 goto bad_args;
5845 str++;
5846 val = parse_fpa_immediate (&str);
5847 if (val == FAIL)
5848 goto failure;
5849 /* FPA immediates are encoded as registers 8-15.
5850 parse_fpa_immediate has already applied the offset. */
5851 inst.operands[i].reg = val;
5852 inst.operands[i].isreg = 1;
5853 break;
5854
5855 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5856 I32z: po_imm_or_fail (0, 32, FALSE); break;
5857
5858 /* Two kinds of register */
5859 case OP_RIWR_RIWC:
5860 {
5861 struct reg_entry *rege = arm_reg_parse_multi (&str);
5862 if (!rege
5863 || (rege->type != REG_TYPE_MMXWR
5864 && rege->type != REG_TYPE_MMXWC
5865 && rege->type != REG_TYPE_MMXWCG))
5866 {
5867 inst.error = _("iWMMXt data or control register expected");
5868 goto failure;
5869 }
5870 inst.operands[i].reg = rege->number;
5871 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5872 }
5873 break;
5874
5875 case OP_RIWC_RIWG:
5876 {
5877 struct reg_entry *rege = arm_reg_parse_multi (&str);
5878 if (!rege
5879 || (rege->type != REG_TYPE_MMXWC
5880 && rege->type != REG_TYPE_MMXWCG))
5881 {
5882 inst.error = _("iWMMXt control register expected");
5883 goto failure;
5884 }
5885 inst.operands[i].reg = rege->number;
5886 inst.operands[i].isreg = 1;
5887 }
5888 break;
5889
5890 /* Misc */
5891 case OP_CPSF: val = parse_cps_flags (&str); break;
5892 case OP_ENDI: val = parse_endian_specifier (&str); break;
5893 case OP_oROR: val = parse_ror (&str); break;
5894 case OP_PSR: val = parse_psr (&str); break;
5895 case OP_COND: val = parse_cond (&str); break;
5896 case OP_oBARRIER:val = parse_barrier (&str); break;
5897
5898 case OP_RVC_PSR:
5899 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5900 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5901 break;
5902 try_psr:
5903 val = parse_psr (&str);
5904 break;
5905
5906 case OP_APSR_RR:
5907 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5908 break;
5909 try_apsr:
5910 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5911 instruction). */
5912 if (strncasecmp (str, "APSR_", 5) == 0)
5913 {
5914 unsigned found = 0;
5915 str += 5;
5916 while (found < 15)
5917 switch (*str++)
5918 {
5919 case 'c': found = (found & 1) ? 16 : found | 1; break;
5920 case 'n': found = (found & 2) ? 16 : found | 2; break;
5921 case 'z': found = (found & 4) ? 16 : found | 4; break;
5922 case 'v': found = (found & 8) ? 16 : found | 8; break;
5923 default: found = 16;
5924 }
5925 if (found != 15)
5926 goto failure;
5927 inst.operands[i].isvec = 1;
5928 }
5929 else
5930 goto failure;
5931 break;
5932
5933 case OP_TB:
5934 po_misc_or_fail (parse_tb (&str));
5935 break;
5936
5937 /* Register lists */
5938 case OP_REGLST:
5939 val = parse_reg_list (&str);
5940 if (*str == '^')
5941 {
5942 inst.operands[1].writeback = 1;
5943 str++;
5944 }
5945 break;
5946
5947 case OP_VRSLST:
5948 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5949 break;
5950
5951 case OP_VRDLST:
5952 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5953 break;
5954
5955 case OP_VRSDLST:
5956 /* Allow Q registers too. */
5957 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5958 REGLIST_NEON_D);
5959 if (val == FAIL)
5960 {
5961 inst.error = NULL;
5962 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5963 REGLIST_VFP_S);
5964 inst.operands[i].issingle = 1;
5965 }
5966 break;
5967
5968 case OP_NRDLST:
5969 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5970 REGLIST_NEON_D);
5971 break;
5972
5973 case OP_NSTRLST:
5974 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5975 &inst.operands[i].vectype);
5976 break;
5977
5978 /* Addressing modes */
5979 case OP_ADDR:
5980 po_misc_or_fail (parse_address (&str, i));
5981 break;
5982
5983 case OP_ADDRGLDR:
5984 po_misc_or_fail_no_backtrack (
5985 parse_address_group_reloc (&str, i, GROUP_LDR));
5986 break;
5987
5988 case OP_ADDRGLDRS:
5989 po_misc_or_fail_no_backtrack (
5990 parse_address_group_reloc (&str, i, GROUP_LDRS));
5991 break;
5992
5993 case OP_ADDRGLDC:
5994 po_misc_or_fail_no_backtrack (
5995 parse_address_group_reloc (&str, i, GROUP_LDC));
5996 break;
5997
5998 case OP_SH:
5999 po_misc_or_fail (parse_shifter_operand (&str, i));
6000 break;
6001
6002 case OP_SHG:
6003 po_misc_or_fail_no_backtrack (
6004 parse_shifter_operand_group_reloc (&str, i));
6005 break;
6006
6007 case OP_oSHll:
6008 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6009 break;
6010
6011 case OP_oSHar:
6012 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6013 break;
6014
6015 case OP_oSHllar:
6016 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6017 break;
6018
6019 default:
6020 as_fatal ("unhandled operand code %d", upat[i]);
6021 }
6022
6023 /* Various value-based sanity checks and shared operations. We
6024 do not signal immediate failures for the register constraints;
6025 this allows a syntax error to take precedence. */
6026 switch (upat[i])
6027 {
6028 case OP_oRRnpc:
6029 case OP_RRnpc:
6030 case OP_RRnpcb:
6031 case OP_RRw:
6032 case OP_oRRw:
6033 case OP_RRnpc_I0:
6034 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6035 inst.error = BAD_PC;
6036 break;
6037
6038 case OP_CPSF:
6039 case OP_ENDI:
6040 case OP_oROR:
6041 case OP_PSR:
6042 case OP_RVC_PSR:
6043 case OP_COND:
6044 case OP_oBARRIER:
6045 case OP_REGLST:
6046 case OP_VRSLST:
6047 case OP_VRDLST:
6048 case OP_VRSDLST:
6049 case OP_NRDLST:
6050 case OP_NSTRLST:
6051 if (val == FAIL)
6052 goto failure;
6053 inst.operands[i].imm = val;
6054 break;
6055
6056 default:
6057 break;
6058 }
6059
6060 /* If we get here, this operand was successfully parsed. */
6061 inst.operands[i].present = 1;
6062 continue;
6063
6064 bad_args:
6065 inst.error = BAD_ARGS;
6066
6067 failure:
6068 if (!backtrack_pos)
6069 {
6070 /* The parse routine should already have set inst.error, but set a
6071 defaut here just in case. */
6072 if (!inst.error)
6073 inst.error = _("syntax error");
6074 return FAIL;
6075 }
6076
6077 /* Do not backtrack over a trailing optional argument that
6078 absorbed some text. We will only fail again, with the
6079 'garbage following instruction' error message, which is
6080 probably less helpful than the current one. */
6081 if (backtrack_index == i && backtrack_pos != str
6082 && upat[i+1] == OP_stop)
6083 {
6084 if (!inst.error)
6085 inst.error = _("syntax error");
6086 return FAIL;
6087 }
6088
6089 /* Try again, skipping the optional argument at backtrack_pos. */
6090 str = backtrack_pos;
6091 inst.error = backtrack_error;
6092 inst.operands[backtrack_index].present = 0;
6093 i = backtrack_index;
6094 backtrack_pos = 0;
6095 }
6096
6097 /* Check that we have parsed all the arguments. */
6098 if (*str != '\0' && !inst.error)
6099 inst.error = _("garbage following instruction");
6100
6101 return inst.error ? FAIL : SUCCESS;
6102 }
6103
6104 #undef po_char_or_fail
6105 #undef po_reg_or_fail
6106 #undef po_reg_or_goto
6107 #undef po_imm_or_fail
6108 #undef po_scalar_or_fail
6109 \f
6110 /* Shorthand macro for instruction encoding functions issuing errors. */
6111 #define constraint(expr, err) do { \
6112 if (expr) \
6113 { \
6114 inst.error = err; \
6115 return; \
6116 } \
6117 } while (0)
6118
6119 /* Functions for operand encoding. ARM, then Thumb. */
6120
6121 #define rotate_left(v, n) (v << n | v >> (32 - n))
6122
6123 /* If VAL can be encoded in the immediate field of an ARM instruction,
6124 return the encoded form. Otherwise, return FAIL. */
6125
6126 static unsigned int
6127 encode_arm_immediate (unsigned int val)
6128 {
6129 unsigned int a, i;
6130
6131 for (i = 0; i < 32; i += 2)
6132 if ((a = rotate_left (val, i)) <= 0xff)
6133 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6134
6135 return FAIL;
6136 }
6137
6138 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6139 return the encoded form. Otherwise, return FAIL. */
6140 static unsigned int
6141 encode_thumb32_immediate (unsigned int val)
6142 {
6143 unsigned int a, i;
6144
6145 if (val <= 0xff)
6146 return val;
6147
6148 for (i = 1; i <= 24; i++)
6149 {
6150 a = val >> i;
6151 if ((val & ~(0xff << i)) == 0)
6152 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6153 }
6154
6155 a = val & 0xff;
6156 if (val == ((a << 16) | a))
6157 return 0x100 | a;
6158 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6159 return 0x300 | a;
6160
6161 a = val & 0xff00;
6162 if (val == ((a << 16) | a))
6163 return 0x200 | (a >> 8);
6164
6165 return FAIL;
6166 }
6167 /* Encode a VFP SP or DP register number into inst.instruction. */
6168
6169 static void
6170 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6171 {
6172 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6173 && reg > 15)
6174 {
6175 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
6176 {
6177 if (thumb_mode)
6178 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6179 fpu_vfp_ext_v3);
6180 else
6181 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6182 fpu_vfp_ext_v3);
6183 }
6184 else
6185 {
6186 first_error (_("D register out of range for selected VFP version"));
6187 return;
6188 }
6189 }
6190
6191 switch (pos)
6192 {
6193 case VFP_REG_Sd:
6194 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6195 break;
6196
6197 case VFP_REG_Sn:
6198 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6199 break;
6200
6201 case VFP_REG_Sm:
6202 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6203 break;
6204
6205 case VFP_REG_Dd:
6206 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6207 break;
6208
6209 case VFP_REG_Dn:
6210 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6211 break;
6212
6213 case VFP_REG_Dm:
6214 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6215 break;
6216
6217 default:
6218 abort ();
6219 }
6220 }
6221
6222 /* Encode a <shift> in an ARM-format instruction. The immediate,
6223 if any, is handled by md_apply_fix. */
6224 static void
6225 encode_arm_shift (int i)
6226 {
6227 if (inst.operands[i].shift_kind == SHIFT_RRX)
6228 inst.instruction |= SHIFT_ROR << 5;
6229 else
6230 {
6231 inst.instruction |= inst.operands[i].shift_kind << 5;
6232 if (inst.operands[i].immisreg)
6233 {
6234 inst.instruction |= SHIFT_BY_REG;
6235 inst.instruction |= inst.operands[i].imm << 8;
6236 }
6237 else
6238 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6239 }
6240 }
6241
6242 static void
6243 encode_arm_shifter_operand (int i)
6244 {
6245 if (inst.operands[i].isreg)
6246 {
6247 inst.instruction |= inst.operands[i].reg;
6248 encode_arm_shift (i);
6249 }
6250 else
6251 inst.instruction |= INST_IMMEDIATE;
6252 }
6253
6254 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6255 static void
6256 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6257 {
6258 assert (inst.operands[i].isreg);
6259 inst.instruction |= inst.operands[i].reg << 16;
6260
6261 if (inst.operands[i].preind)
6262 {
6263 if (is_t)
6264 {
6265 inst.error = _("instruction does not accept preindexed addressing");
6266 return;
6267 }
6268 inst.instruction |= PRE_INDEX;
6269 if (inst.operands[i].writeback)
6270 inst.instruction |= WRITE_BACK;
6271
6272 }
6273 else if (inst.operands[i].postind)
6274 {
6275 assert (inst.operands[i].writeback);
6276 if (is_t)
6277 inst.instruction |= WRITE_BACK;
6278 }
6279 else /* unindexed - only for coprocessor */
6280 {
6281 inst.error = _("instruction does not accept unindexed addressing");
6282 return;
6283 }
6284
6285 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6286 && (((inst.instruction & 0x000f0000) >> 16)
6287 == ((inst.instruction & 0x0000f000) >> 12)))
6288 as_warn ((inst.instruction & LOAD_BIT)
6289 ? _("destination register same as write-back base")
6290 : _("source register same as write-back base"));
6291 }
6292
6293 /* inst.operands[i] was set up by parse_address. Encode it into an
6294 ARM-format mode 2 load or store instruction. If is_t is true,
6295 reject forms that cannot be used with a T instruction (i.e. not
6296 post-indexed). */
6297 static void
6298 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6299 {
6300 encode_arm_addr_mode_common (i, is_t);
6301
6302 if (inst.operands[i].immisreg)
6303 {
6304 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6305 inst.instruction |= inst.operands[i].imm;
6306 if (!inst.operands[i].negative)
6307 inst.instruction |= INDEX_UP;
6308 if (inst.operands[i].shifted)
6309 {
6310 if (inst.operands[i].shift_kind == SHIFT_RRX)
6311 inst.instruction |= SHIFT_ROR << 5;
6312 else
6313 {
6314 inst.instruction |= inst.operands[i].shift_kind << 5;
6315 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6316 }
6317 }
6318 }
6319 else /* immediate offset in inst.reloc */
6320 {
6321 if (inst.reloc.type == BFD_RELOC_UNUSED)
6322 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6323 }
6324 }
6325
6326 /* inst.operands[i] was set up by parse_address. Encode it into an
6327 ARM-format mode 3 load or store instruction. Reject forms that
6328 cannot be used with such instructions. If is_t is true, reject
6329 forms that cannot be used with a T instruction (i.e. not
6330 post-indexed). */
6331 static void
6332 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6333 {
6334 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6335 {
6336 inst.error = _("instruction does not accept scaled register index");
6337 return;
6338 }
6339
6340 encode_arm_addr_mode_common (i, is_t);
6341
6342 if (inst.operands[i].immisreg)
6343 {
6344 inst.instruction |= inst.operands[i].imm;
6345 if (!inst.operands[i].negative)
6346 inst.instruction |= INDEX_UP;
6347 }
6348 else /* immediate offset in inst.reloc */
6349 {
6350 inst.instruction |= HWOFFSET_IMM;
6351 if (inst.reloc.type == BFD_RELOC_UNUSED)
6352 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6353 }
6354 }
6355
6356 /* inst.operands[i] was set up by parse_address. Encode it into an
6357 ARM-format instruction. Reject all forms which cannot be encoded
6358 into a coprocessor load/store instruction. If wb_ok is false,
6359 reject use of writeback; if unind_ok is false, reject use of
6360 unindexed addressing. If reloc_override is not 0, use it instead
6361 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6362 (in which case it is preserved). */
6363
6364 static int
6365 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6366 {
6367 inst.instruction |= inst.operands[i].reg << 16;
6368
6369 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6370
6371 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6372 {
6373 assert (!inst.operands[i].writeback);
6374 if (!unind_ok)
6375 {
6376 inst.error = _("instruction does not support unindexed addressing");
6377 return FAIL;
6378 }
6379 inst.instruction |= inst.operands[i].imm;
6380 inst.instruction |= INDEX_UP;
6381 return SUCCESS;
6382 }
6383
6384 if (inst.operands[i].preind)
6385 inst.instruction |= PRE_INDEX;
6386
6387 if (inst.operands[i].writeback)
6388 {
6389 if (inst.operands[i].reg == REG_PC)
6390 {
6391 inst.error = _("pc may not be used with write-back");
6392 return FAIL;
6393 }
6394 if (!wb_ok)
6395 {
6396 inst.error = _("instruction does not support writeback");
6397 return FAIL;
6398 }
6399 inst.instruction |= WRITE_BACK;
6400 }
6401
6402 if (reloc_override)
6403 inst.reloc.type = reloc_override;
6404 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6405 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6406 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6407 {
6408 if (thumb_mode)
6409 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6410 else
6411 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6412 }
6413
6414 return SUCCESS;
6415 }
6416
6417 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6418 Determine whether it can be performed with a move instruction; if
6419 it can, convert inst.instruction to that move instruction and
6420 return 1; if it can't, convert inst.instruction to a literal-pool
6421 load and return 0. If this is not a valid thing to do in the
6422 current context, set inst.error and return 1.
6423
6424 inst.operands[i] describes the destination register. */
6425
6426 static int
6427 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6428 {
6429 unsigned long tbit;
6430
6431 if (thumb_p)
6432 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6433 else
6434 tbit = LOAD_BIT;
6435
6436 if ((inst.instruction & tbit) == 0)
6437 {
6438 inst.error = _("invalid pseudo operation");
6439 return 1;
6440 }
6441 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6442 {
6443 inst.error = _("constant expression expected");
6444 return 1;
6445 }
6446 if (inst.reloc.exp.X_op == O_constant)
6447 {
6448 if (thumb_p)
6449 {
6450 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6451 {
6452 /* This can be done with a mov(1) instruction. */
6453 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6454 inst.instruction |= inst.reloc.exp.X_add_number;
6455 return 1;
6456 }
6457 }
6458 else
6459 {
6460 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6461 if (value != FAIL)
6462 {
6463 /* This can be done with a mov instruction. */
6464 inst.instruction &= LITERAL_MASK;
6465 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6466 inst.instruction |= value & 0xfff;
6467 return 1;
6468 }
6469
6470 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6471 if (value != FAIL)
6472 {
6473 /* This can be done with a mvn instruction. */
6474 inst.instruction &= LITERAL_MASK;
6475 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6476 inst.instruction |= value & 0xfff;
6477 return 1;
6478 }
6479 }
6480 }
6481
6482 if (add_to_lit_pool () == FAIL)
6483 {
6484 inst.error = _("literal pool insertion failed");
6485 return 1;
6486 }
6487 inst.operands[1].reg = REG_PC;
6488 inst.operands[1].isreg = 1;
6489 inst.operands[1].preind = 1;
6490 inst.reloc.pc_rel = 1;
6491 inst.reloc.type = (thumb_p
6492 ? BFD_RELOC_ARM_THUMB_OFFSET
6493 : (mode_3
6494 ? BFD_RELOC_ARM_HWLITERAL
6495 : BFD_RELOC_ARM_LITERAL));
6496 return 0;
6497 }
6498
6499 /* Functions for instruction encoding, sorted by subarchitecture.
6500 First some generics; their names are taken from the conventional
6501 bit positions for register arguments in ARM format instructions. */
6502
6503 static void
6504 do_noargs (void)
6505 {
6506 }
6507
6508 static void
6509 do_rd (void)
6510 {
6511 inst.instruction |= inst.operands[0].reg << 12;
6512 }
6513
6514 static void
6515 do_rd_rm (void)
6516 {
6517 inst.instruction |= inst.operands[0].reg << 12;
6518 inst.instruction |= inst.operands[1].reg;
6519 }
6520
6521 static void
6522 do_rd_rn (void)
6523 {
6524 inst.instruction |= inst.operands[0].reg << 12;
6525 inst.instruction |= inst.operands[1].reg << 16;
6526 }
6527
6528 static void
6529 do_rn_rd (void)
6530 {
6531 inst.instruction |= inst.operands[0].reg << 16;
6532 inst.instruction |= inst.operands[1].reg << 12;
6533 }
6534
6535 static void
6536 do_rd_rm_rn (void)
6537 {
6538 unsigned Rn = inst.operands[2].reg;
6539 /* Enforce restrictions on SWP instruction. */
6540 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6541 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6542 _("Rn must not overlap other operands"));
6543 inst.instruction |= inst.operands[0].reg << 12;
6544 inst.instruction |= inst.operands[1].reg;
6545 inst.instruction |= Rn << 16;
6546 }
6547
6548 static void
6549 do_rd_rn_rm (void)
6550 {
6551 inst.instruction |= inst.operands[0].reg << 12;
6552 inst.instruction |= inst.operands[1].reg << 16;
6553 inst.instruction |= inst.operands[2].reg;
6554 }
6555
6556 static void
6557 do_rm_rd_rn (void)
6558 {
6559 inst.instruction |= inst.operands[0].reg;
6560 inst.instruction |= inst.operands[1].reg << 12;
6561 inst.instruction |= inst.operands[2].reg << 16;
6562 }
6563
6564 static void
6565 do_imm0 (void)
6566 {
6567 inst.instruction |= inst.operands[0].imm;
6568 }
6569
6570 static void
6571 do_rd_cpaddr (void)
6572 {
6573 inst.instruction |= inst.operands[0].reg << 12;
6574 encode_arm_cp_address (1, TRUE, TRUE, 0);
6575 }
6576
6577 /* ARM instructions, in alphabetical order by function name (except
6578 that wrapper functions appear immediately after the function they
6579 wrap). */
6580
6581 /* This is a pseudo-op of the form "adr rd, label" to be converted
6582 into a relative address of the form "add rd, pc, #label-.-8". */
6583
6584 static void
6585 do_adr (void)
6586 {
6587 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6588
6589 /* Frag hacking will turn this into a sub instruction if the offset turns
6590 out to be negative. */
6591 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6592 inst.reloc.pc_rel = 1;
6593 inst.reloc.exp.X_add_number -= 8;
6594 }
6595
6596 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6597 into a relative address of the form:
6598 add rd, pc, #low(label-.-8)"
6599 add rd, rd, #high(label-.-8)" */
6600
6601 static void
6602 do_adrl (void)
6603 {
6604 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6605
6606 /* Frag hacking will turn this into a sub instruction if the offset turns
6607 out to be negative. */
6608 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6609 inst.reloc.pc_rel = 1;
6610 inst.size = INSN_SIZE * 2;
6611 inst.reloc.exp.X_add_number -= 8;
6612 }
6613
6614 static void
6615 do_arit (void)
6616 {
6617 if (!inst.operands[1].present)
6618 inst.operands[1].reg = inst.operands[0].reg;
6619 inst.instruction |= inst.operands[0].reg << 12;
6620 inst.instruction |= inst.operands[1].reg << 16;
6621 encode_arm_shifter_operand (2);
6622 }
6623
6624 static void
6625 do_barrier (void)
6626 {
6627 if (inst.operands[0].present)
6628 {
6629 constraint ((inst.instruction & 0xf0) != 0x40
6630 && inst.operands[0].imm != 0xf,
6631 "bad barrier type");
6632 inst.instruction |= inst.operands[0].imm;
6633 }
6634 else
6635 inst.instruction |= 0xf;
6636 }
6637
6638 static void
6639 do_bfc (void)
6640 {
6641 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6642 constraint (msb > 32, _("bit-field extends past end of register"));
6643 /* The instruction encoding stores the LSB and MSB,
6644 not the LSB and width. */
6645 inst.instruction |= inst.operands[0].reg << 12;
6646 inst.instruction |= inst.operands[1].imm << 7;
6647 inst.instruction |= (msb - 1) << 16;
6648 }
6649
6650 static void
6651 do_bfi (void)
6652 {
6653 unsigned int msb;
6654
6655 /* #0 in second position is alternative syntax for bfc, which is
6656 the same instruction but with REG_PC in the Rm field. */
6657 if (!inst.operands[1].isreg)
6658 inst.operands[1].reg = REG_PC;
6659
6660 msb = inst.operands[2].imm + inst.operands[3].imm;
6661 constraint (msb > 32, _("bit-field extends past end of register"));
6662 /* The instruction encoding stores the LSB and MSB,
6663 not the LSB and width. */
6664 inst.instruction |= inst.operands[0].reg << 12;
6665 inst.instruction |= inst.operands[1].reg;
6666 inst.instruction |= inst.operands[2].imm << 7;
6667 inst.instruction |= (msb - 1) << 16;
6668 }
6669
6670 static void
6671 do_bfx (void)
6672 {
6673 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6674 _("bit-field extends past end of register"));
6675 inst.instruction |= inst.operands[0].reg << 12;
6676 inst.instruction |= inst.operands[1].reg;
6677 inst.instruction |= inst.operands[2].imm << 7;
6678 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6679 }
6680
6681 /* ARM V5 breakpoint instruction (argument parse)
6682 BKPT <16 bit unsigned immediate>
6683 Instruction is not conditional.
6684 The bit pattern given in insns[] has the COND_ALWAYS condition,
6685 and it is an error if the caller tried to override that. */
6686
6687 static void
6688 do_bkpt (void)
6689 {
6690 /* Top 12 of 16 bits to bits 19:8. */
6691 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6692
6693 /* Bottom 4 of 16 bits to bits 3:0. */
6694 inst.instruction |= inst.operands[0].imm & 0xf;
6695 }
6696
6697 static void
6698 encode_branch (int default_reloc)
6699 {
6700 if (inst.operands[0].hasreloc)
6701 {
6702 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6703 _("the only suffix valid here is '(plt)'"));
6704 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6705 }
6706 else
6707 {
6708 inst.reloc.type = default_reloc;
6709 }
6710 inst.reloc.pc_rel = 1;
6711 }
6712
6713 static void
6714 do_branch (void)
6715 {
6716 #ifdef OBJ_ELF
6717 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6718 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6719 else
6720 #endif
6721 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6722 }
6723
6724 static void
6725 do_bl (void)
6726 {
6727 #ifdef OBJ_ELF
6728 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6729 {
6730 if (inst.cond == COND_ALWAYS)
6731 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6732 else
6733 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6734 }
6735 else
6736 #endif
6737 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6738 }
6739
6740 /* ARM V5 branch-link-exchange instruction (argument parse)
6741 BLX <target_addr> ie BLX(1)
6742 BLX{<condition>} <Rm> ie BLX(2)
6743 Unfortunately, there are two different opcodes for this mnemonic.
6744 So, the insns[].value is not used, and the code here zaps values
6745 into inst.instruction.
6746 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6747
6748 static void
6749 do_blx (void)
6750 {
6751 if (inst.operands[0].isreg)
6752 {
6753 /* Arg is a register; the opcode provided by insns[] is correct.
6754 It is not illegal to do "blx pc", just useless. */
6755 if (inst.operands[0].reg == REG_PC)
6756 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6757
6758 inst.instruction |= inst.operands[0].reg;
6759 }
6760 else
6761 {
6762 /* Arg is an address; this instruction cannot be executed
6763 conditionally, and the opcode must be adjusted. */
6764 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6765 inst.instruction = 0xfa000000;
6766 #ifdef OBJ_ELF
6767 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6768 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6769 else
6770 #endif
6771 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6772 }
6773 }
6774
6775 static void
6776 do_bx (void)
6777 {
6778 if (inst.operands[0].reg == REG_PC)
6779 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6780
6781 inst.instruction |= inst.operands[0].reg;
6782 }
6783
6784
6785 /* ARM v5TEJ. Jump to Jazelle code. */
6786
6787 static void
6788 do_bxj (void)
6789 {
6790 if (inst.operands[0].reg == REG_PC)
6791 as_tsktsk (_("use of r15 in bxj is not really useful"));
6792
6793 inst.instruction |= inst.operands[0].reg;
6794 }
6795
6796 /* Co-processor data operation:
6797 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6798 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6799 static void
6800 do_cdp (void)
6801 {
6802 inst.instruction |= inst.operands[0].reg << 8;
6803 inst.instruction |= inst.operands[1].imm << 20;
6804 inst.instruction |= inst.operands[2].reg << 12;
6805 inst.instruction |= inst.operands[3].reg << 16;
6806 inst.instruction |= inst.operands[4].reg;
6807 inst.instruction |= inst.operands[5].imm << 5;
6808 }
6809
6810 static void
6811 do_cmp (void)
6812 {
6813 inst.instruction |= inst.operands[0].reg << 16;
6814 encode_arm_shifter_operand (1);
6815 }
6816
6817 /* Transfer between coprocessor and ARM registers.
6818 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6819 MRC2
6820 MCR{cond}
6821 MCR2
6822
6823 No special properties. */
6824
6825 static void
6826 do_co_reg (void)
6827 {
6828 inst.instruction |= inst.operands[0].reg << 8;
6829 inst.instruction |= inst.operands[1].imm << 21;
6830 inst.instruction |= inst.operands[2].reg << 12;
6831 inst.instruction |= inst.operands[3].reg << 16;
6832 inst.instruction |= inst.operands[4].reg;
6833 inst.instruction |= inst.operands[5].imm << 5;
6834 }
6835
6836 /* Transfer between coprocessor register and pair of ARM registers.
6837 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6838 MCRR2
6839 MRRC{cond}
6840 MRRC2
6841
6842 Two XScale instructions are special cases of these:
6843
6844 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6845 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6846
6847 Result unpredicatable if Rd or Rn is R15. */
6848
6849 static void
6850 do_co_reg2c (void)
6851 {
6852 inst.instruction |= inst.operands[0].reg << 8;
6853 inst.instruction |= inst.operands[1].imm << 4;
6854 inst.instruction |= inst.operands[2].reg << 12;
6855 inst.instruction |= inst.operands[3].reg << 16;
6856 inst.instruction |= inst.operands[4].reg;
6857 }
6858
6859 static void
6860 do_cpsi (void)
6861 {
6862 inst.instruction |= inst.operands[0].imm << 6;
6863 if (inst.operands[1].present)
6864 {
6865 inst.instruction |= CPSI_MMOD;
6866 inst.instruction |= inst.operands[1].imm;
6867 }
6868 }
6869
6870 static void
6871 do_dbg (void)
6872 {
6873 inst.instruction |= inst.operands[0].imm;
6874 }
6875
6876 static void
6877 do_it (void)
6878 {
6879 /* There is no IT instruction in ARM mode. We
6880 process it but do not generate code for it. */
6881 inst.size = 0;
6882 }
6883
6884 static void
6885 do_ldmstm (void)
6886 {
6887 int base_reg = inst.operands[0].reg;
6888 int range = inst.operands[1].imm;
6889
6890 inst.instruction |= base_reg << 16;
6891 inst.instruction |= range;
6892
6893 if (inst.operands[1].writeback)
6894 inst.instruction |= LDM_TYPE_2_OR_3;
6895
6896 if (inst.operands[0].writeback)
6897 {
6898 inst.instruction |= WRITE_BACK;
6899 /* Check for unpredictable uses of writeback. */
6900 if (inst.instruction & LOAD_BIT)
6901 {
6902 /* Not allowed in LDM type 2. */
6903 if ((inst.instruction & LDM_TYPE_2_OR_3)
6904 && ((range & (1 << REG_PC)) == 0))
6905 as_warn (_("writeback of base register is UNPREDICTABLE"));
6906 /* Only allowed if base reg not in list for other types. */
6907 else if (range & (1 << base_reg))
6908 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6909 }
6910 else /* STM. */
6911 {
6912 /* Not allowed for type 2. */
6913 if (inst.instruction & LDM_TYPE_2_OR_3)
6914 as_warn (_("writeback of base register is UNPREDICTABLE"));
6915 /* Only allowed if base reg not in list, or first in list. */
6916 else if ((range & (1 << base_reg))
6917 && (range & ((1 << base_reg) - 1)))
6918 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6919 }
6920 }
6921 }
6922
6923 /* ARMv5TE load-consecutive (argument parse)
6924 Mode is like LDRH.
6925
6926 LDRccD R, mode
6927 STRccD R, mode. */
6928
6929 static void
6930 do_ldrd (void)
6931 {
6932 constraint (inst.operands[0].reg % 2 != 0,
6933 _("first destination register must be even"));
6934 constraint (inst.operands[1].present
6935 && inst.operands[1].reg != inst.operands[0].reg + 1,
6936 _("can only load two consecutive registers"));
6937 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6938 constraint (!inst.operands[2].isreg, _("'[' expected"));
6939
6940 if (!inst.operands[1].present)
6941 inst.operands[1].reg = inst.operands[0].reg + 1;
6942
6943 if (inst.instruction & LOAD_BIT)
6944 {
6945 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6946 register and the first register written; we have to diagnose
6947 overlap between the base and the second register written here. */
6948
6949 if (inst.operands[2].reg == inst.operands[1].reg
6950 && (inst.operands[2].writeback || inst.operands[2].postind))
6951 as_warn (_("base register written back, and overlaps "
6952 "second destination register"));
6953
6954 /* For an index-register load, the index register must not overlap the
6955 destination (even if not write-back). */
6956 else if (inst.operands[2].immisreg
6957 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6958 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6959 as_warn (_("index register overlaps destination register"));
6960 }
6961
6962 inst.instruction |= inst.operands[0].reg << 12;
6963 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6964 }
6965
6966 static void
6967 do_ldrex (void)
6968 {
6969 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6970 || inst.operands[1].postind || inst.operands[1].writeback
6971 || inst.operands[1].immisreg || inst.operands[1].shifted
6972 || inst.operands[1].negative
6973 /* This can arise if the programmer has written
6974 strex rN, rM, foo
6975 or if they have mistakenly used a register name as the last
6976 operand, eg:
6977 strex rN, rM, rX
6978 It is very difficult to distinguish between these two cases
6979 because "rX" might actually be a label. ie the register
6980 name has been occluded by a symbol of the same name. So we
6981 just generate a general 'bad addressing mode' type error
6982 message and leave it up to the programmer to discover the
6983 true cause and fix their mistake. */
6984 || (inst.operands[1].reg == REG_PC),
6985 BAD_ADDR_MODE);
6986
6987 constraint (inst.reloc.exp.X_op != O_constant
6988 || inst.reloc.exp.X_add_number != 0,
6989 _("offset must be zero in ARM encoding"));
6990
6991 inst.instruction |= inst.operands[0].reg << 12;
6992 inst.instruction |= inst.operands[1].reg << 16;
6993 inst.reloc.type = BFD_RELOC_UNUSED;
6994 }
6995
6996 static void
6997 do_ldrexd (void)
6998 {
6999 constraint (inst.operands[0].reg % 2 != 0,
7000 _("even register required"));
7001 constraint (inst.operands[1].present
7002 && inst.operands[1].reg != inst.operands[0].reg + 1,
7003 _("can only load two consecutive registers"));
7004 /* If op 1 were present and equal to PC, this function wouldn't
7005 have been called in the first place. */
7006 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7007
7008 inst.instruction |= inst.operands[0].reg << 12;
7009 inst.instruction |= inst.operands[2].reg << 16;
7010 }
7011
7012 static void
7013 do_ldst (void)
7014 {
7015 inst.instruction |= inst.operands[0].reg << 12;
7016 if (!inst.operands[1].isreg)
7017 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7018 return;
7019 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7020 }
7021
7022 static void
7023 do_ldstt (void)
7024 {
7025 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7026 reject [Rn,...]. */
7027 if (inst.operands[1].preind)
7028 {
7029 constraint (inst.reloc.exp.X_op != O_constant ||
7030 inst.reloc.exp.X_add_number != 0,
7031 _("this instruction requires a post-indexed address"));
7032
7033 inst.operands[1].preind = 0;
7034 inst.operands[1].postind = 1;
7035 inst.operands[1].writeback = 1;
7036 }
7037 inst.instruction |= inst.operands[0].reg << 12;
7038 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7039 }
7040
7041 /* Halfword and signed-byte load/store operations. */
7042
7043 static void
7044 do_ldstv4 (void)
7045 {
7046 inst.instruction |= inst.operands[0].reg << 12;
7047 if (!inst.operands[1].isreg)
7048 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7049 return;
7050 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7051 }
7052
7053 static void
7054 do_ldsttv4 (void)
7055 {
7056 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7057 reject [Rn,...]. */
7058 if (inst.operands[1].preind)
7059 {
7060 constraint (inst.reloc.exp.X_op != O_constant ||
7061 inst.reloc.exp.X_add_number != 0,
7062 _("this instruction requires a post-indexed address"));
7063
7064 inst.operands[1].preind = 0;
7065 inst.operands[1].postind = 1;
7066 inst.operands[1].writeback = 1;
7067 }
7068 inst.instruction |= inst.operands[0].reg << 12;
7069 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7070 }
7071
7072 /* Co-processor register load/store.
7073 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7074 static void
7075 do_lstc (void)
7076 {
7077 inst.instruction |= inst.operands[0].reg << 8;
7078 inst.instruction |= inst.operands[1].reg << 12;
7079 encode_arm_cp_address (2, TRUE, TRUE, 0);
7080 }
7081
7082 static void
7083 do_mlas (void)
7084 {
7085 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7086 if (inst.operands[0].reg == inst.operands[1].reg
7087 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7088 && !(inst.instruction & 0x00400000))
7089 as_tsktsk (_("Rd and Rm should be different in mla"));
7090
7091 inst.instruction |= inst.operands[0].reg << 16;
7092 inst.instruction |= inst.operands[1].reg;
7093 inst.instruction |= inst.operands[2].reg << 8;
7094 inst.instruction |= inst.operands[3].reg << 12;
7095 }
7096
7097 static void
7098 do_mov (void)
7099 {
7100 inst.instruction |= inst.operands[0].reg << 12;
7101 encode_arm_shifter_operand (1);
7102 }
7103
7104 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7105 static void
7106 do_mov16 (void)
7107 {
7108 bfd_vma imm;
7109 bfd_boolean top;
7110
7111 top = (inst.instruction & 0x00400000) != 0;
7112 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7113 _(":lower16: not allowed this instruction"));
7114 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7115 _(":upper16: not allowed instruction"));
7116 inst.instruction |= inst.operands[0].reg << 12;
7117 if (inst.reloc.type == BFD_RELOC_UNUSED)
7118 {
7119 imm = inst.reloc.exp.X_add_number;
7120 /* The value is in two pieces: 0:11, 16:19. */
7121 inst.instruction |= (imm & 0x00000fff);
7122 inst.instruction |= (imm & 0x0000f000) << 4;
7123 }
7124 }
7125
7126 static void do_vfp_nsyn_opcode (const char *);
7127
7128 static int
7129 do_vfp_nsyn_mrs (void)
7130 {
7131 if (inst.operands[0].isvec)
7132 {
7133 if (inst.operands[1].reg != 1)
7134 first_error (_("operand 1 must be FPSCR"));
7135 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7136 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7137 do_vfp_nsyn_opcode ("fmstat");
7138 }
7139 else if (inst.operands[1].isvec)
7140 do_vfp_nsyn_opcode ("fmrx");
7141 else
7142 return FAIL;
7143
7144 return SUCCESS;
7145 }
7146
7147 static int
7148 do_vfp_nsyn_msr (void)
7149 {
7150 if (inst.operands[0].isvec)
7151 do_vfp_nsyn_opcode ("fmxr");
7152 else
7153 return FAIL;
7154
7155 return SUCCESS;
7156 }
7157
7158 static void
7159 do_mrs (void)
7160 {
7161 if (do_vfp_nsyn_mrs () == SUCCESS)
7162 return;
7163
7164 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7165 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7166 != (PSR_c|PSR_f),
7167 _("'CPSR' or 'SPSR' expected"));
7168 inst.instruction |= inst.operands[0].reg << 12;
7169 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7170 }
7171
7172 /* Two possible forms:
7173 "{C|S}PSR_<field>, Rm",
7174 "{C|S}PSR_f, #expression". */
7175
7176 static void
7177 do_msr (void)
7178 {
7179 if (do_vfp_nsyn_msr () == SUCCESS)
7180 return;
7181
7182 inst.instruction |= inst.operands[0].imm;
7183 if (inst.operands[1].isreg)
7184 inst.instruction |= inst.operands[1].reg;
7185 else
7186 {
7187 inst.instruction |= INST_IMMEDIATE;
7188 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7189 inst.reloc.pc_rel = 0;
7190 }
7191 }
7192
7193 static void
7194 do_mul (void)
7195 {
7196 if (!inst.operands[2].present)
7197 inst.operands[2].reg = inst.operands[0].reg;
7198 inst.instruction |= inst.operands[0].reg << 16;
7199 inst.instruction |= inst.operands[1].reg;
7200 inst.instruction |= inst.operands[2].reg << 8;
7201
7202 if (inst.operands[0].reg == inst.operands[1].reg
7203 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7204 as_tsktsk (_("Rd and Rm should be different in mul"));
7205 }
7206
7207 /* Long Multiply Parser
7208 UMULL RdLo, RdHi, Rm, Rs
7209 SMULL RdLo, RdHi, Rm, Rs
7210 UMLAL RdLo, RdHi, Rm, Rs
7211 SMLAL RdLo, RdHi, Rm, Rs. */
7212
7213 static void
7214 do_mull (void)
7215 {
7216 inst.instruction |= inst.operands[0].reg << 12;
7217 inst.instruction |= inst.operands[1].reg << 16;
7218 inst.instruction |= inst.operands[2].reg;
7219 inst.instruction |= inst.operands[3].reg << 8;
7220
7221 /* rdhi, rdlo and rm must all be different. */
7222 if (inst.operands[0].reg == inst.operands[1].reg
7223 || inst.operands[0].reg == inst.operands[2].reg
7224 || inst.operands[1].reg == inst.operands[2].reg)
7225 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7226 }
7227
7228 static void
7229 do_nop (void)
7230 {
7231 if (inst.operands[0].present)
7232 {
7233 /* Architectural NOP hints are CPSR sets with no bits selected. */
7234 inst.instruction &= 0xf0000000;
7235 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7236 }
7237 }
7238
7239 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7240 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7241 Condition defaults to COND_ALWAYS.
7242 Error if Rd, Rn or Rm are R15. */
7243
7244 static void
7245 do_pkhbt (void)
7246 {
7247 inst.instruction |= inst.operands[0].reg << 12;
7248 inst.instruction |= inst.operands[1].reg << 16;
7249 inst.instruction |= inst.operands[2].reg;
7250 if (inst.operands[3].present)
7251 encode_arm_shift (3);
7252 }
7253
7254 /* ARM V6 PKHTB (Argument Parse). */
7255
7256 static void
7257 do_pkhtb (void)
7258 {
7259 if (!inst.operands[3].present)
7260 {
7261 /* If the shift specifier is omitted, turn the instruction
7262 into pkhbt rd, rm, rn. */
7263 inst.instruction &= 0xfff00010;
7264 inst.instruction |= inst.operands[0].reg << 12;
7265 inst.instruction |= inst.operands[1].reg;
7266 inst.instruction |= inst.operands[2].reg << 16;
7267 }
7268 else
7269 {
7270 inst.instruction |= inst.operands[0].reg << 12;
7271 inst.instruction |= inst.operands[1].reg << 16;
7272 inst.instruction |= inst.operands[2].reg;
7273 encode_arm_shift (3);
7274 }
7275 }
7276
7277 /* ARMv5TE: Preload-Cache
7278
7279 PLD <addr_mode>
7280
7281 Syntactically, like LDR with B=1, W=0, L=1. */
7282
7283 static void
7284 do_pld (void)
7285 {
7286 constraint (!inst.operands[0].isreg,
7287 _("'[' expected after PLD mnemonic"));
7288 constraint (inst.operands[0].postind,
7289 _("post-indexed expression used in preload instruction"));
7290 constraint (inst.operands[0].writeback,
7291 _("writeback used in preload instruction"));
7292 constraint (!inst.operands[0].preind,
7293 _("unindexed addressing used in preload instruction"));
7294 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7295 }
7296
7297 /* ARMv7: PLI <addr_mode> */
7298 static void
7299 do_pli (void)
7300 {
7301 constraint (!inst.operands[0].isreg,
7302 _("'[' expected after PLI mnemonic"));
7303 constraint (inst.operands[0].postind,
7304 _("post-indexed expression used in preload instruction"));
7305 constraint (inst.operands[0].writeback,
7306 _("writeback used in preload instruction"));
7307 constraint (!inst.operands[0].preind,
7308 _("unindexed addressing used in preload instruction"));
7309 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7310 inst.instruction &= ~PRE_INDEX;
7311 }
7312
7313 static void
7314 do_push_pop (void)
7315 {
7316 inst.operands[1] = inst.operands[0];
7317 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7318 inst.operands[0].isreg = 1;
7319 inst.operands[0].writeback = 1;
7320 inst.operands[0].reg = REG_SP;
7321 do_ldmstm ();
7322 }
7323
7324 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7325 word at the specified address and the following word
7326 respectively.
7327 Unconditionally executed.
7328 Error if Rn is R15. */
7329
7330 static void
7331 do_rfe (void)
7332 {
7333 inst.instruction |= inst.operands[0].reg << 16;
7334 if (inst.operands[0].writeback)
7335 inst.instruction |= WRITE_BACK;
7336 }
7337
7338 /* ARM V6 ssat (argument parse). */
7339
7340 static void
7341 do_ssat (void)
7342 {
7343 inst.instruction |= inst.operands[0].reg << 12;
7344 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7345 inst.instruction |= inst.operands[2].reg;
7346
7347 if (inst.operands[3].present)
7348 encode_arm_shift (3);
7349 }
7350
7351 /* ARM V6 usat (argument parse). */
7352
7353 static void
7354 do_usat (void)
7355 {
7356 inst.instruction |= inst.operands[0].reg << 12;
7357 inst.instruction |= inst.operands[1].imm << 16;
7358 inst.instruction |= inst.operands[2].reg;
7359
7360 if (inst.operands[3].present)
7361 encode_arm_shift (3);
7362 }
7363
7364 /* ARM V6 ssat16 (argument parse). */
7365
7366 static void
7367 do_ssat16 (void)
7368 {
7369 inst.instruction |= inst.operands[0].reg << 12;
7370 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7371 inst.instruction |= inst.operands[2].reg;
7372 }
7373
7374 static void
7375 do_usat16 (void)
7376 {
7377 inst.instruction |= inst.operands[0].reg << 12;
7378 inst.instruction |= inst.operands[1].imm << 16;
7379 inst.instruction |= inst.operands[2].reg;
7380 }
7381
7382 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7383 preserving the other bits.
7384
7385 setend <endian_specifier>, where <endian_specifier> is either
7386 BE or LE. */
7387
7388 static void
7389 do_setend (void)
7390 {
7391 if (inst.operands[0].imm)
7392 inst.instruction |= 0x200;
7393 }
7394
7395 static void
7396 do_shift (void)
7397 {
7398 unsigned int Rm = (inst.operands[1].present
7399 ? inst.operands[1].reg
7400 : inst.operands[0].reg);
7401
7402 inst.instruction |= inst.operands[0].reg << 12;
7403 inst.instruction |= Rm;
7404 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7405 {
7406 inst.instruction |= inst.operands[2].reg << 8;
7407 inst.instruction |= SHIFT_BY_REG;
7408 }
7409 else
7410 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7411 }
7412
7413 static void
7414 do_smc (void)
7415 {
7416 inst.reloc.type = BFD_RELOC_ARM_SMC;
7417 inst.reloc.pc_rel = 0;
7418 }
7419
7420 static void
7421 do_swi (void)
7422 {
7423 inst.reloc.type = BFD_RELOC_ARM_SWI;
7424 inst.reloc.pc_rel = 0;
7425 }
7426
7427 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7428 SMLAxy{cond} Rd,Rm,Rs,Rn
7429 SMLAWy{cond} Rd,Rm,Rs,Rn
7430 Error if any register is R15. */
7431
7432 static void
7433 do_smla (void)
7434 {
7435 inst.instruction |= inst.operands[0].reg << 16;
7436 inst.instruction |= inst.operands[1].reg;
7437 inst.instruction |= inst.operands[2].reg << 8;
7438 inst.instruction |= inst.operands[3].reg << 12;
7439 }
7440
7441 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7442 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7443 Error if any register is R15.
7444 Warning if Rdlo == Rdhi. */
7445
7446 static void
7447 do_smlal (void)
7448 {
7449 inst.instruction |= inst.operands[0].reg << 12;
7450 inst.instruction |= inst.operands[1].reg << 16;
7451 inst.instruction |= inst.operands[2].reg;
7452 inst.instruction |= inst.operands[3].reg << 8;
7453
7454 if (inst.operands[0].reg == inst.operands[1].reg)
7455 as_tsktsk (_("rdhi and rdlo must be different"));
7456 }
7457
7458 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7459 SMULxy{cond} Rd,Rm,Rs
7460 Error if any register is R15. */
7461
7462 static void
7463 do_smul (void)
7464 {
7465 inst.instruction |= inst.operands[0].reg << 16;
7466 inst.instruction |= inst.operands[1].reg;
7467 inst.instruction |= inst.operands[2].reg << 8;
7468 }
7469
7470 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7471 the same for both ARM and Thumb-2. */
7472
7473 static void
7474 do_srs (void)
7475 {
7476 int reg;
7477
7478 if (inst.operands[0].present)
7479 {
7480 reg = inst.operands[0].reg;
7481 constraint (reg != 13, _("SRS base register must be r13"));
7482 }
7483 else
7484 reg = 13;
7485
7486 inst.instruction |= reg << 16;
7487 inst.instruction |= inst.operands[1].imm;
7488 if (inst.operands[0].writeback || inst.operands[1].writeback)
7489 inst.instruction |= WRITE_BACK;
7490 }
7491
7492 /* ARM V6 strex (argument parse). */
7493
7494 static void
7495 do_strex (void)
7496 {
7497 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7498 || inst.operands[2].postind || inst.operands[2].writeback
7499 || inst.operands[2].immisreg || inst.operands[2].shifted
7500 || inst.operands[2].negative
7501 /* See comment in do_ldrex(). */
7502 || (inst.operands[2].reg == REG_PC),
7503 BAD_ADDR_MODE);
7504
7505 constraint (inst.operands[0].reg == inst.operands[1].reg
7506 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7507
7508 constraint (inst.reloc.exp.X_op != O_constant
7509 || inst.reloc.exp.X_add_number != 0,
7510 _("offset must be zero in ARM encoding"));
7511
7512 inst.instruction |= inst.operands[0].reg << 12;
7513 inst.instruction |= inst.operands[1].reg;
7514 inst.instruction |= inst.operands[2].reg << 16;
7515 inst.reloc.type = BFD_RELOC_UNUSED;
7516 }
7517
7518 static void
7519 do_strexd (void)
7520 {
7521 constraint (inst.operands[1].reg % 2 != 0,
7522 _("even register required"));
7523 constraint (inst.operands[2].present
7524 && inst.operands[2].reg != inst.operands[1].reg + 1,
7525 _("can only store two consecutive registers"));
7526 /* If op 2 were present and equal to PC, this function wouldn't
7527 have been called in the first place. */
7528 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7529
7530 constraint (inst.operands[0].reg == inst.operands[1].reg
7531 || inst.operands[0].reg == inst.operands[1].reg + 1
7532 || inst.operands[0].reg == inst.operands[3].reg,
7533 BAD_OVERLAP);
7534
7535 inst.instruction |= inst.operands[0].reg << 12;
7536 inst.instruction |= inst.operands[1].reg;
7537 inst.instruction |= inst.operands[3].reg << 16;
7538 }
7539
7540 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7541 extends it to 32-bits, and adds the result to a value in another
7542 register. You can specify a rotation by 0, 8, 16, or 24 bits
7543 before extracting the 16-bit value.
7544 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7545 Condition defaults to COND_ALWAYS.
7546 Error if any register uses R15. */
7547
7548 static void
7549 do_sxtah (void)
7550 {
7551 inst.instruction |= inst.operands[0].reg << 12;
7552 inst.instruction |= inst.operands[1].reg << 16;
7553 inst.instruction |= inst.operands[2].reg;
7554 inst.instruction |= inst.operands[3].imm << 10;
7555 }
7556
7557 /* ARM V6 SXTH.
7558
7559 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7560 Condition defaults to COND_ALWAYS.
7561 Error if any register uses R15. */
7562
7563 static void
7564 do_sxth (void)
7565 {
7566 inst.instruction |= inst.operands[0].reg << 12;
7567 inst.instruction |= inst.operands[1].reg;
7568 inst.instruction |= inst.operands[2].imm << 10;
7569 }
7570 \f
7571 /* VFP instructions. In a logical order: SP variant first, monad
7572 before dyad, arithmetic then move then load/store. */
7573
7574 static void
7575 do_vfp_sp_monadic (void)
7576 {
7577 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7578 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7579 }
7580
7581 static void
7582 do_vfp_sp_dyadic (void)
7583 {
7584 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7585 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7586 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7587 }
7588
7589 static void
7590 do_vfp_sp_compare_z (void)
7591 {
7592 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7593 }
7594
7595 static void
7596 do_vfp_dp_sp_cvt (void)
7597 {
7598 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7599 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7600 }
7601
7602 static void
7603 do_vfp_sp_dp_cvt (void)
7604 {
7605 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7606 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7607 }
7608
7609 static void
7610 do_vfp_reg_from_sp (void)
7611 {
7612 inst.instruction |= inst.operands[0].reg << 12;
7613 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7614 }
7615
7616 static void
7617 do_vfp_reg2_from_sp2 (void)
7618 {
7619 constraint (inst.operands[2].imm != 2,
7620 _("only two consecutive VFP SP registers allowed here"));
7621 inst.instruction |= inst.operands[0].reg << 12;
7622 inst.instruction |= inst.operands[1].reg << 16;
7623 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7624 }
7625
7626 static void
7627 do_vfp_sp_from_reg (void)
7628 {
7629 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7630 inst.instruction |= inst.operands[1].reg << 12;
7631 }
7632
7633 static void
7634 do_vfp_sp2_from_reg2 (void)
7635 {
7636 constraint (inst.operands[0].imm != 2,
7637 _("only two consecutive VFP SP registers allowed here"));
7638 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7639 inst.instruction |= inst.operands[1].reg << 12;
7640 inst.instruction |= inst.operands[2].reg << 16;
7641 }
7642
7643 static void
7644 do_vfp_sp_ldst (void)
7645 {
7646 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7647 encode_arm_cp_address (1, FALSE, TRUE, 0);
7648 }
7649
7650 static void
7651 do_vfp_dp_ldst (void)
7652 {
7653 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7654 encode_arm_cp_address (1, FALSE, TRUE, 0);
7655 }
7656
7657
7658 static void
7659 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7660 {
7661 if (inst.operands[0].writeback)
7662 inst.instruction |= WRITE_BACK;
7663 else
7664 constraint (ldstm_type != VFP_LDSTMIA,
7665 _("this addressing mode requires base-register writeback"));
7666 inst.instruction |= inst.operands[0].reg << 16;
7667 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7668 inst.instruction |= inst.operands[1].imm;
7669 }
7670
7671 static void
7672 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7673 {
7674 int count;
7675
7676 if (inst.operands[0].writeback)
7677 inst.instruction |= WRITE_BACK;
7678 else
7679 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7680 _("this addressing mode requires base-register writeback"));
7681
7682 inst.instruction |= inst.operands[0].reg << 16;
7683 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7684
7685 count = inst.operands[1].imm << 1;
7686 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7687 count += 1;
7688
7689 inst.instruction |= count;
7690 }
7691
7692 static void
7693 do_vfp_sp_ldstmia (void)
7694 {
7695 vfp_sp_ldstm (VFP_LDSTMIA);
7696 }
7697
7698 static void
7699 do_vfp_sp_ldstmdb (void)
7700 {
7701 vfp_sp_ldstm (VFP_LDSTMDB);
7702 }
7703
7704 static void
7705 do_vfp_dp_ldstmia (void)
7706 {
7707 vfp_dp_ldstm (VFP_LDSTMIA);
7708 }
7709
7710 static void
7711 do_vfp_dp_ldstmdb (void)
7712 {
7713 vfp_dp_ldstm (VFP_LDSTMDB);
7714 }
7715
7716 static void
7717 do_vfp_xp_ldstmia (void)
7718 {
7719 vfp_dp_ldstm (VFP_LDSTMIAX);
7720 }
7721
7722 static void
7723 do_vfp_xp_ldstmdb (void)
7724 {
7725 vfp_dp_ldstm (VFP_LDSTMDBX);
7726 }
7727
7728 static void
7729 do_vfp_dp_rd_rm (void)
7730 {
7731 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7732 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7733 }
7734
7735 static void
7736 do_vfp_dp_rn_rd (void)
7737 {
7738 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7739 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7740 }
7741
7742 static void
7743 do_vfp_dp_rd_rn (void)
7744 {
7745 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7746 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7747 }
7748
7749 static void
7750 do_vfp_dp_rd_rn_rm (void)
7751 {
7752 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7753 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7754 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7755 }
7756
7757 static void
7758 do_vfp_dp_rd (void)
7759 {
7760 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7761 }
7762
7763 static void
7764 do_vfp_dp_rm_rd_rn (void)
7765 {
7766 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7767 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7768 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7769 }
7770
7771 /* VFPv3 instructions. */
7772 static void
7773 do_vfp_sp_const (void)
7774 {
7775 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7776 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7777 inst.instruction |= (inst.operands[1].imm & 0x0f);
7778 }
7779
7780 static void
7781 do_vfp_dp_const (void)
7782 {
7783 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7784 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7785 inst.instruction |= (inst.operands[1].imm & 0x0f);
7786 }
7787
7788 static void
7789 vfp_conv (int srcsize)
7790 {
7791 unsigned immbits = srcsize - inst.operands[1].imm;
7792 inst.instruction |= (immbits & 1) << 5;
7793 inst.instruction |= (immbits >> 1);
7794 }
7795
7796 static void
7797 do_vfp_sp_conv_16 (void)
7798 {
7799 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7800 vfp_conv (16);
7801 }
7802
7803 static void
7804 do_vfp_dp_conv_16 (void)
7805 {
7806 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7807 vfp_conv (16);
7808 }
7809
7810 static void
7811 do_vfp_sp_conv_32 (void)
7812 {
7813 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7814 vfp_conv (32);
7815 }
7816
7817 static void
7818 do_vfp_dp_conv_32 (void)
7819 {
7820 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7821 vfp_conv (32);
7822 }
7823
7824 \f
7825 /* FPA instructions. Also in a logical order. */
7826
7827 static void
7828 do_fpa_cmp (void)
7829 {
7830 inst.instruction |= inst.operands[0].reg << 16;
7831 inst.instruction |= inst.operands[1].reg;
7832 }
7833
7834 static void
7835 do_fpa_ldmstm (void)
7836 {
7837 inst.instruction |= inst.operands[0].reg << 12;
7838 switch (inst.operands[1].imm)
7839 {
7840 case 1: inst.instruction |= CP_T_X; break;
7841 case 2: inst.instruction |= CP_T_Y; break;
7842 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7843 case 4: break;
7844 default: abort ();
7845 }
7846
7847 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7848 {
7849 /* The instruction specified "ea" or "fd", so we can only accept
7850 [Rn]{!}. The instruction does not really support stacking or
7851 unstacking, so we have to emulate these by setting appropriate
7852 bits and offsets. */
7853 constraint (inst.reloc.exp.X_op != O_constant
7854 || inst.reloc.exp.X_add_number != 0,
7855 _("this instruction does not support indexing"));
7856
7857 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7858 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7859
7860 if (!(inst.instruction & INDEX_UP))
7861 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7862
7863 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7864 {
7865 inst.operands[2].preind = 0;
7866 inst.operands[2].postind = 1;
7867 }
7868 }
7869
7870 encode_arm_cp_address (2, TRUE, TRUE, 0);
7871 }
7872
7873 \f
7874 /* iWMMXt instructions: strictly in alphabetical order. */
7875
7876 static void
7877 do_iwmmxt_tandorc (void)
7878 {
7879 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7880 }
7881
7882 static void
7883 do_iwmmxt_textrc (void)
7884 {
7885 inst.instruction |= inst.operands[0].reg << 12;
7886 inst.instruction |= inst.operands[1].imm;
7887 }
7888
7889 static void
7890 do_iwmmxt_textrm (void)
7891 {
7892 inst.instruction |= inst.operands[0].reg << 12;
7893 inst.instruction |= inst.operands[1].reg << 16;
7894 inst.instruction |= inst.operands[2].imm;
7895 }
7896
7897 static void
7898 do_iwmmxt_tinsr (void)
7899 {
7900 inst.instruction |= inst.operands[0].reg << 16;
7901 inst.instruction |= inst.operands[1].reg << 12;
7902 inst.instruction |= inst.operands[2].imm;
7903 }
7904
7905 static void
7906 do_iwmmxt_tmia (void)
7907 {
7908 inst.instruction |= inst.operands[0].reg << 5;
7909 inst.instruction |= inst.operands[1].reg;
7910 inst.instruction |= inst.operands[2].reg << 12;
7911 }
7912
7913 static void
7914 do_iwmmxt_waligni (void)
7915 {
7916 inst.instruction |= inst.operands[0].reg << 12;
7917 inst.instruction |= inst.operands[1].reg << 16;
7918 inst.instruction |= inst.operands[2].reg;
7919 inst.instruction |= inst.operands[3].imm << 20;
7920 }
7921
7922 static void
7923 do_iwmmxt_wmerge (void)
7924 {
7925 inst.instruction |= inst.operands[0].reg << 12;
7926 inst.instruction |= inst.operands[1].reg << 16;
7927 inst.instruction |= inst.operands[2].reg;
7928 inst.instruction |= inst.operands[3].imm << 21;
7929 }
7930
7931 static void
7932 do_iwmmxt_wmov (void)
7933 {
7934 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7935 inst.instruction |= inst.operands[0].reg << 12;
7936 inst.instruction |= inst.operands[1].reg << 16;
7937 inst.instruction |= inst.operands[1].reg;
7938 }
7939
7940 static void
7941 do_iwmmxt_wldstbh (void)
7942 {
7943 int reloc;
7944 inst.instruction |= inst.operands[0].reg << 12;
7945 if (thumb_mode)
7946 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7947 else
7948 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7949 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7950 }
7951
7952 static void
7953 do_iwmmxt_wldstw (void)
7954 {
7955 /* RIWR_RIWC clears .isreg for a control register. */
7956 if (!inst.operands[0].isreg)
7957 {
7958 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7959 inst.instruction |= 0xf0000000;
7960 }
7961
7962 inst.instruction |= inst.operands[0].reg << 12;
7963 encode_arm_cp_address (1, TRUE, TRUE, 0);
7964 }
7965
7966 static void
7967 do_iwmmxt_wldstd (void)
7968 {
7969 inst.instruction |= inst.operands[0].reg << 12;
7970 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
7971 && inst.operands[1].immisreg)
7972 {
7973 inst.instruction &= ~0x1a000ff;
7974 inst.instruction |= (0xf << 28);
7975 if (inst.operands[1].preind)
7976 inst.instruction |= PRE_INDEX;
7977 if (!inst.operands[1].negative)
7978 inst.instruction |= INDEX_UP;
7979 if (inst.operands[1].writeback)
7980 inst.instruction |= WRITE_BACK;
7981 inst.instruction |= inst.operands[1].reg << 16;
7982 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7983 inst.instruction |= inst.operands[1].imm;
7984 }
7985 else
7986 encode_arm_cp_address (1, TRUE, FALSE, 0);
7987 }
7988
7989 static void
7990 do_iwmmxt_wshufh (void)
7991 {
7992 inst.instruction |= inst.operands[0].reg << 12;
7993 inst.instruction |= inst.operands[1].reg << 16;
7994 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7995 inst.instruction |= (inst.operands[2].imm & 0x0f);
7996 }
7997
7998 static void
7999 do_iwmmxt_wzero (void)
8000 {
8001 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8002 inst.instruction |= inst.operands[0].reg;
8003 inst.instruction |= inst.operands[0].reg << 12;
8004 inst.instruction |= inst.operands[0].reg << 16;
8005 }
8006
8007 static void
8008 do_iwmmxt_wrwrwr_or_imm5 (void)
8009 {
8010 if (inst.operands[2].isreg)
8011 do_rd_rn_rm ();
8012 else {
8013 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8014 _("immediate operand requires iWMMXt2"));
8015 do_rd_rn ();
8016 if (inst.operands[2].imm == 0)
8017 {
8018 switch ((inst.instruction >> 20) & 0xf)
8019 {
8020 case 4:
8021 case 5:
8022 case 6:
8023 case 7:
8024 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8025 inst.operands[2].imm = 16;
8026 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8027 break;
8028 case 8:
8029 case 9:
8030 case 10:
8031 case 11:
8032 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8033 inst.operands[2].imm = 32;
8034 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8035 break;
8036 case 12:
8037 case 13:
8038 case 14:
8039 case 15:
8040 {
8041 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8042 unsigned long wrn;
8043 wrn = (inst.instruction >> 16) & 0xf;
8044 inst.instruction &= 0xff0fff0f;
8045 inst.instruction |= wrn;
8046 /* Bail out here; the instruction is now assembled. */
8047 return;
8048 }
8049 }
8050 }
8051 /* Map 32 -> 0, etc. */
8052 inst.operands[2].imm &= 0x1f;
8053 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8054 }
8055 }
8056 \f
8057 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8058 operations first, then control, shift, and load/store. */
8059
8060 /* Insns like "foo X,Y,Z". */
8061
8062 static void
8063 do_mav_triple (void)
8064 {
8065 inst.instruction |= inst.operands[0].reg << 16;
8066 inst.instruction |= inst.operands[1].reg;
8067 inst.instruction |= inst.operands[2].reg << 12;
8068 }
8069
8070 /* Insns like "foo W,X,Y,Z".
8071 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8072
8073 static void
8074 do_mav_quad (void)
8075 {
8076 inst.instruction |= inst.operands[0].reg << 5;
8077 inst.instruction |= inst.operands[1].reg << 12;
8078 inst.instruction |= inst.operands[2].reg << 16;
8079 inst.instruction |= inst.operands[3].reg;
8080 }
8081
8082 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8083 static void
8084 do_mav_dspsc (void)
8085 {
8086 inst.instruction |= inst.operands[1].reg << 12;
8087 }
8088
8089 /* Maverick shift immediate instructions.
8090 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8091 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8092
8093 static void
8094 do_mav_shift (void)
8095 {
8096 int imm = inst.operands[2].imm;
8097
8098 inst.instruction |= inst.operands[0].reg << 12;
8099 inst.instruction |= inst.operands[1].reg << 16;
8100
8101 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8102 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8103 Bit 4 should be 0. */
8104 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8105
8106 inst.instruction |= imm;
8107 }
8108 \f
8109 /* XScale instructions. Also sorted arithmetic before move. */
8110
8111 /* Xscale multiply-accumulate (argument parse)
8112 MIAcc acc0,Rm,Rs
8113 MIAPHcc acc0,Rm,Rs
8114 MIAxycc acc0,Rm,Rs. */
8115
8116 static void
8117 do_xsc_mia (void)
8118 {
8119 inst.instruction |= inst.operands[1].reg;
8120 inst.instruction |= inst.operands[2].reg << 12;
8121 }
8122
8123 /* Xscale move-accumulator-register (argument parse)
8124
8125 MARcc acc0,RdLo,RdHi. */
8126
8127 static void
8128 do_xsc_mar (void)
8129 {
8130 inst.instruction |= inst.operands[1].reg << 12;
8131 inst.instruction |= inst.operands[2].reg << 16;
8132 }
8133
8134 /* Xscale move-register-accumulator (argument parse)
8135
8136 MRAcc RdLo,RdHi,acc0. */
8137
8138 static void
8139 do_xsc_mra (void)
8140 {
8141 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8142 inst.instruction |= inst.operands[0].reg << 12;
8143 inst.instruction |= inst.operands[1].reg << 16;
8144 }
8145 \f
8146 /* Encoding functions relevant only to Thumb. */
8147
8148 /* inst.operands[i] is a shifted-register operand; encode
8149 it into inst.instruction in the format used by Thumb32. */
8150
8151 static void
8152 encode_thumb32_shifted_operand (int i)
8153 {
8154 unsigned int value = inst.reloc.exp.X_add_number;
8155 unsigned int shift = inst.operands[i].shift_kind;
8156
8157 constraint (inst.operands[i].immisreg,
8158 _("shift by register not allowed in thumb mode"));
8159 inst.instruction |= inst.operands[i].reg;
8160 if (shift == SHIFT_RRX)
8161 inst.instruction |= SHIFT_ROR << 4;
8162 else
8163 {
8164 constraint (inst.reloc.exp.X_op != O_constant,
8165 _("expression too complex"));
8166
8167 constraint (value > 32
8168 || (value == 32 && (shift == SHIFT_LSL
8169 || shift == SHIFT_ROR)),
8170 _("shift expression is too large"));
8171
8172 if (value == 0)
8173 shift = SHIFT_LSL;
8174 else if (value == 32)
8175 value = 0;
8176
8177 inst.instruction |= shift << 4;
8178 inst.instruction |= (value & 0x1c) << 10;
8179 inst.instruction |= (value & 0x03) << 6;
8180 }
8181 }
8182
8183
8184 /* inst.operands[i] was set up by parse_address. Encode it into a
8185 Thumb32 format load or store instruction. Reject forms that cannot
8186 be used with such instructions. If is_t is true, reject forms that
8187 cannot be used with a T instruction; if is_d is true, reject forms
8188 that cannot be used with a D instruction. */
8189
8190 static void
8191 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8192 {
8193 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8194
8195 constraint (!inst.operands[i].isreg,
8196 _("Instruction does not support =N addresses"));
8197
8198 inst.instruction |= inst.operands[i].reg << 16;
8199 if (inst.operands[i].immisreg)
8200 {
8201 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8202 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8203 constraint (inst.operands[i].negative,
8204 _("Thumb does not support negative register indexing"));
8205 constraint (inst.operands[i].postind,
8206 _("Thumb does not support register post-indexing"));
8207 constraint (inst.operands[i].writeback,
8208 _("Thumb does not support register indexing with writeback"));
8209 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8210 _("Thumb supports only LSL in shifted register indexing"));
8211
8212 inst.instruction |= inst.operands[i].imm;
8213 if (inst.operands[i].shifted)
8214 {
8215 constraint (inst.reloc.exp.X_op != O_constant,
8216 _("expression too complex"));
8217 constraint (inst.reloc.exp.X_add_number < 0
8218 || inst.reloc.exp.X_add_number > 3,
8219 _("shift out of range"));
8220 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8221 }
8222 inst.reloc.type = BFD_RELOC_UNUSED;
8223 }
8224 else if (inst.operands[i].preind)
8225 {
8226 constraint (is_pc && inst.operands[i].writeback,
8227 _("cannot use writeback with PC-relative addressing"));
8228 constraint (is_t && inst.operands[i].writeback,
8229 _("cannot use writeback with this instruction"));
8230
8231 if (is_d)
8232 {
8233 inst.instruction |= 0x01000000;
8234 if (inst.operands[i].writeback)
8235 inst.instruction |= 0x00200000;
8236 }
8237 else
8238 {
8239 inst.instruction |= 0x00000c00;
8240 if (inst.operands[i].writeback)
8241 inst.instruction |= 0x00000100;
8242 }
8243 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8244 }
8245 else if (inst.operands[i].postind)
8246 {
8247 assert (inst.operands[i].writeback);
8248 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8249 constraint (is_t, _("cannot use post-indexing with this instruction"));
8250
8251 if (is_d)
8252 inst.instruction |= 0x00200000;
8253 else
8254 inst.instruction |= 0x00000900;
8255 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8256 }
8257 else /* unindexed - only for coprocessor */
8258 inst.error = _("instruction does not accept unindexed addressing");
8259 }
8260
8261 /* Table of Thumb instructions which exist in both 16- and 32-bit
8262 encodings (the latter only in post-V6T2 cores). The index is the
8263 value used in the insns table below. When there is more than one
8264 possible 16-bit encoding for the instruction, this table always
8265 holds variant (1).
8266 Also contains several pseudo-instructions used during relaxation. */
8267 #define T16_32_TAB \
8268 X(adc, 4140, eb400000), \
8269 X(adcs, 4140, eb500000), \
8270 X(add, 1c00, eb000000), \
8271 X(adds, 1c00, eb100000), \
8272 X(addi, 0000, f1000000), \
8273 X(addis, 0000, f1100000), \
8274 X(add_pc,000f, f20f0000), \
8275 X(add_sp,000d, f10d0000), \
8276 X(adr, 000f, f20f0000), \
8277 X(and, 4000, ea000000), \
8278 X(ands, 4000, ea100000), \
8279 X(asr, 1000, fa40f000), \
8280 X(asrs, 1000, fa50f000), \
8281 X(b, e000, f000b000), \
8282 X(bcond, d000, f0008000), \
8283 X(bic, 4380, ea200000), \
8284 X(bics, 4380, ea300000), \
8285 X(cmn, 42c0, eb100f00), \
8286 X(cmp, 2800, ebb00f00), \
8287 X(cpsie, b660, f3af8400), \
8288 X(cpsid, b670, f3af8600), \
8289 X(cpy, 4600, ea4f0000), \
8290 X(dec_sp,80dd, f1ad0d00), \
8291 X(eor, 4040, ea800000), \
8292 X(eors, 4040, ea900000), \
8293 X(inc_sp,00dd, f10d0d00), \
8294 X(ldmia, c800, e8900000), \
8295 X(ldr, 6800, f8500000), \
8296 X(ldrb, 7800, f8100000), \
8297 X(ldrh, 8800, f8300000), \
8298 X(ldrsb, 5600, f9100000), \
8299 X(ldrsh, 5e00, f9300000), \
8300 X(ldr_pc,4800, f85f0000), \
8301 X(ldr_pc2,4800, f85f0000), \
8302 X(ldr_sp,9800, f85d0000), \
8303 X(lsl, 0000, fa00f000), \
8304 X(lsls, 0000, fa10f000), \
8305 X(lsr, 0800, fa20f000), \
8306 X(lsrs, 0800, fa30f000), \
8307 X(mov, 2000, ea4f0000), \
8308 X(movs, 2000, ea5f0000), \
8309 X(mul, 4340, fb00f000), \
8310 X(muls, 4340, ffffffff), /* no 32b muls */ \
8311 X(mvn, 43c0, ea6f0000), \
8312 X(mvns, 43c0, ea7f0000), \
8313 X(neg, 4240, f1c00000), /* rsb #0 */ \
8314 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8315 X(orr, 4300, ea400000), \
8316 X(orrs, 4300, ea500000), \
8317 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8318 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8319 X(rev, ba00, fa90f080), \
8320 X(rev16, ba40, fa90f090), \
8321 X(revsh, bac0, fa90f0b0), \
8322 X(ror, 41c0, fa60f000), \
8323 X(rors, 41c0, fa70f000), \
8324 X(sbc, 4180, eb600000), \
8325 X(sbcs, 4180, eb700000), \
8326 X(stmia, c000, e8800000), \
8327 X(str, 6000, f8400000), \
8328 X(strb, 7000, f8000000), \
8329 X(strh, 8000, f8200000), \
8330 X(str_sp,9000, f84d0000), \
8331 X(sub, 1e00, eba00000), \
8332 X(subs, 1e00, ebb00000), \
8333 X(subi, 8000, f1a00000), \
8334 X(subis, 8000, f1b00000), \
8335 X(sxtb, b240, fa4ff080), \
8336 X(sxth, b200, fa0ff080), \
8337 X(tst, 4200, ea100f00), \
8338 X(uxtb, b2c0, fa5ff080), \
8339 X(uxth, b280, fa1ff080), \
8340 X(nop, bf00, f3af8000), \
8341 X(yield, bf10, f3af8001), \
8342 X(wfe, bf20, f3af8002), \
8343 X(wfi, bf30, f3af8003), \
8344 X(sev, bf40, f3af9004), /* typo, 8004? */
8345
8346 /* To catch errors in encoding functions, the codes are all offset by
8347 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8348 as 16-bit instructions. */
8349 #define X(a,b,c) T_MNEM_##a
8350 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8351 #undef X
8352
8353 #define X(a,b,c) 0x##b
8354 static const unsigned short thumb_op16[] = { T16_32_TAB };
8355 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8356 #undef X
8357
8358 #define X(a,b,c) 0x##c
8359 static const unsigned int thumb_op32[] = { T16_32_TAB };
8360 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8361 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8362 #undef X
8363 #undef T16_32_TAB
8364
8365 /* Thumb instruction encoders, in alphabetical order. */
8366
8367 /* ADDW or SUBW. */
8368 static void
8369 do_t_add_sub_w (void)
8370 {
8371 int Rd, Rn;
8372
8373 Rd = inst.operands[0].reg;
8374 Rn = inst.operands[1].reg;
8375
8376 constraint (Rd == 15, _("PC not allowed as destination"));
8377 inst.instruction |= (Rn << 16) | (Rd << 8);
8378 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8379 }
8380
8381 /* Parse an add or subtract instruction. We get here with inst.instruction
8382 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8383
8384 static void
8385 do_t_add_sub (void)
8386 {
8387 int Rd, Rs, Rn;
8388
8389 Rd = inst.operands[0].reg;
8390 Rs = (inst.operands[1].present
8391 ? inst.operands[1].reg /* Rd, Rs, foo */
8392 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8393
8394 if (unified_syntax)
8395 {
8396 bfd_boolean flags;
8397 bfd_boolean narrow;
8398 int opcode;
8399
8400 flags = (inst.instruction == T_MNEM_adds
8401 || inst.instruction == T_MNEM_subs);
8402 if (flags)
8403 narrow = (current_it_mask == 0);
8404 else
8405 narrow = (current_it_mask != 0);
8406 if (!inst.operands[2].isreg)
8407 {
8408 int add;
8409
8410 add = (inst.instruction == T_MNEM_add
8411 || inst.instruction == T_MNEM_adds);
8412 opcode = 0;
8413 if (inst.size_req != 4)
8414 {
8415 /* Attempt to use a narrow opcode, with relaxation if
8416 appropriate. */
8417 if (Rd == REG_SP && Rs == REG_SP && !flags)
8418 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8419 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8420 opcode = T_MNEM_add_sp;
8421 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8422 opcode = T_MNEM_add_pc;
8423 else if (Rd <= 7 && Rs <= 7 && narrow)
8424 {
8425 if (flags)
8426 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8427 else
8428 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8429 }
8430 if (opcode)
8431 {
8432 inst.instruction = THUMB_OP16(opcode);
8433 inst.instruction |= (Rd << 4) | Rs;
8434 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8435 if (inst.size_req != 2)
8436 inst.relax = opcode;
8437 }
8438 else
8439 constraint (inst.size_req == 2, BAD_HIREG);
8440 }
8441 if (inst.size_req == 4
8442 || (inst.size_req != 2 && !opcode))
8443 {
8444 if (Rd == REG_PC)
8445 {
8446 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
8447 _("only SUBS PC, LR, #const allowed"));
8448 constraint (inst.reloc.exp.X_op != O_constant,
8449 _("expression too complex"));
8450 constraint (inst.reloc.exp.X_add_number < 0
8451 || inst.reloc.exp.X_add_number > 0xff,
8452 _("immediate value out of range"));
8453 inst.instruction = T2_SUBS_PC_LR
8454 | inst.reloc.exp.X_add_number;
8455 inst.reloc.type = BFD_RELOC_UNUSED;
8456 return;
8457 }
8458 else if (Rs == REG_PC)
8459 {
8460 /* Always use addw/subw. */
8461 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8462 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8463 }
8464 else
8465 {
8466 inst.instruction = THUMB_OP32 (inst.instruction);
8467 inst.instruction = (inst.instruction & 0xe1ffffff)
8468 | 0x10000000;
8469 if (flags)
8470 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8471 else
8472 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8473 }
8474 inst.instruction |= Rd << 8;
8475 inst.instruction |= Rs << 16;
8476 }
8477 }
8478 else
8479 {
8480 Rn = inst.operands[2].reg;
8481 /* See if we can do this with a 16-bit instruction. */
8482 if (!inst.operands[2].shifted && inst.size_req != 4)
8483 {
8484 if (Rd > 7 || Rs > 7 || Rn > 7)
8485 narrow = FALSE;
8486
8487 if (narrow)
8488 {
8489 inst.instruction = ((inst.instruction == T_MNEM_adds
8490 || inst.instruction == T_MNEM_add)
8491 ? T_OPCODE_ADD_R3
8492 : T_OPCODE_SUB_R3);
8493 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8494 return;
8495 }
8496
8497 if (inst.instruction == T_MNEM_add)
8498 {
8499 if (Rd == Rs)
8500 {
8501 inst.instruction = T_OPCODE_ADD_HI;
8502 inst.instruction |= (Rd & 8) << 4;
8503 inst.instruction |= (Rd & 7);
8504 inst.instruction |= Rn << 3;
8505 return;
8506 }
8507 /* ... because addition is commutative! */
8508 else if (Rd == Rn)
8509 {
8510 inst.instruction = T_OPCODE_ADD_HI;
8511 inst.instruction |= (Rd & 8) << 4;
8512 inst.instruction |= (Rd & 7);
8513 inst.instruction |= Rs << 3;
8514 return;
8515 }
8516 }
8517 }
8518 /* If we get here, it can't be done in 16 bits. */
8519 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8520 _("shift must be constant"));
8521 inst.instruction = THUMB_OP32 (inst.instruction);
8522 inst.instruction |= Rd << 8;
8523 inst.instruction |= Rs << 16;
8524 encode_thumb32_shifted_operand (2);
8525 }
8526 }
8527 else
8528 {
8529 constraint (inst.instruction == T_MNEM_adds
8530 || inst.instruction == T_MNEM_subs,
8531 BAD_THUMB32);
8532
8533 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8534 {
8535 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8536 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8537 BAD_HIREG);
8538
8539 inst.instruction = (inst.instruction == T_MNEM_add
8540 ? 0x0000 : 0x8000);
8541 inst.instruction |= (Rd << 4) | Rs;
8542 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8543 return;
8544 }
8545
8546 Rn = inst.operands[2].reg;
8547 constraint (inst.operands[2].shifted, _("unshifted register required"));
8548
8549 /* We now have Rd, Rs, and Rn set to registers. */
8550 if (Rd > 7 || Rs > 7 || Rn > 7)
8551 {
8552 /* Can't do this for SUB. */
8553 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8554 inst.instruction = T_OPCODE_ADD_HI;
8555 inst.instruction |= (Rd & 8) << 4;
8556 inst.instruction |= (Rd & 7);
8557 if (Rs == Rd)
8558 inst.instruction |= Rn << 3;
8559 else if (Rn == Rd)
8560 inst.instruction |= Rs << 3;
8561 else
8562 constraint (1, _("dest must overlap one source register"));
8563 }
8564 else
8565 {
8566 inst.instruction = (inst.instruction == T_MNEM_add
8567 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8568 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8569 }
8570 }
8571 }
8572
8573 static void
8574 do_t_adr (void)
8575 {
8576 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
8577 {
8578 /* Defer to section relaxation. */
8579 inst.relax = inst.instruction;
8580 inst.instruction = THUMB_OP16 (inst.instruction);
8581 inst.instruction |= inst.operands[0].reg << 4;
8582 }
8583 else if (unified_syntax && inst.size_req != 2)
8584 {
8585 /* Generate a 32-bit opcode. */
8586 inst.instruction = THUMB_OP32 (inst.instruction);
8587 inst.instruction |= inst.operands[0].reg << 8;
8588 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8589 inst.reloc.pc_rel = 1;
8590 }
8591 else
8592 {
8593 /* Generate a 16-bit opcode. */
8594 inst.instruction = THUMB_OP16 (inst.instruction);
8595 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8596 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8597 inst.reloc.pc_rel = 1;
8598
8599 inst.instruction |= inst.operands[0].reg << 4;
8600 }
8601 }
8602
8603 /* Arithmetic instructions for which there is just one 16-bit
8604 instruction encoding, and it allows only two low registers.
8605 For maximal compatibility with ARM syntax, we allow three register
8606 operands even when Thumb-32 instructions are not available, as long
8607 as the first two are identical. For instance, both "sbc r0,r1" and
8608 "sbc r0,r0,r1" are allowed. */
8609 static void
8610 do_t_arit3 (void)
8611 {
8612 int Rd, Rs, Rn;
8613
8614 Rd = inst.operands[0].reg;
8615 Rs = (inst.operands[1].present
8616 ? inst.operands[1].reg /* Rd, Rs, foo */
8617 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8618 Rn = inst.operands[2].reg;
8619
8620 if (unified_syntax)
8621 {
8622 if (!inst.operands[2].isreg)
8623 {
8624 /* For an immediate, we always generate a 32-bit opcode;
8625 section relaxation will shrink it later if possible. */
8626 inst.instruction = THUMB_OP32 (inst.instruction);
8627 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8628 inst.instruction |= Rd << 8;
8629 inst.instruction |= Rs << 16;
8630 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8631 }
8632 else
8633 {
8634 bfd_boolean narrow;
8635
8636 /* See if we can do this with a 16-bit instruction. */
8637 if (THUMB_SETS_FLAGS (inst.instruction))
8638 narrow = current_it_mask == 0;
8639 else
8640 narrow = current_it_mask != 0;
8641
8642 if (Rd > 7 || Rn > 7 || Rs > 7)
8643 narrow = FALSE;
8644 if (inst.operands[2].shifted)
8645 narrow = FALSE;
8646 if (inst.size_req == 4)
8647 narrow = FALSE;
8648
8649 if (narrow
8650 && Rd == Rs)
8651 {
8652 inst.instruction = THUMB_OP16 (inst.instruction);
8653 inst.instruction |= Rd;
8654 inst.instruction |= Rn << 3;
8655 return;
8656 }
8657
8658 /* If we get here, it can't be done in 16 bits. */
8659 constraint (inst.operands[2].shifted
8660 && inst.operands[2].immisreg,
8661 _("shift must be constant"));
8662 inst.instruction = THUMB_OP32 (inst.instruction);
8663 inst.instruction |= Rd << 8;
8664 inst.instruction |= Rs << 16;
8665 encode_thumb32_shifted_operand (2);
8666 }
8667 }
8668 else
8669 {
8670 /* On its face this is a lie - the instruction does set the
8671 flags. However, the only supported mnemonic in this mode
8672 says it doesn't. */
8673 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8674
8675 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8676 _("unshifted register required"));
8677 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8678 constraint (Rd != Rs,
8679 _("dest and source1 must be the same register"));
8680
8681 inst.instruction = THUMB_OP16 (inst.instruction);
8682 inst.instruction |= Rd;
8683 inst.instruction |= Rn << 3;
8684 }
8685 }
8686
8687 /* Similarly, but for instructions where the arithmetic operation is
8688 commutative, so we can allow either of them to be different from
8689 the destination operand in a 16-bit instruction. For instance, all
8690 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8691 accepted. */
8692 static void
8693 do_t_arit3c (void)
8694 {
8695 int Rd, Rs, Rn;
8696
8697 Rd = inst.operands[0].reg;
8698 Rs = (inst.operands[1].present
8699 ? inst.operands[1].reg /* Rd, Rs, foo */
8700 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8701 Rn = inst.operands[2].reg;
8702
8703 if (unified_syntax)
8704 {
8705 if (!inst.operands[2].isreg)
8706 {
8707 /* For an immediate, we always generate a 32-bit opcode;
8708 section relaxation will shrink it later if possible. */
8709 inst.instruction = THUMB_OP32 (inst.instruction);
8710 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8711 inst.instruction |= Rd << 8;
8712 inst.instruction |= Rs << 16;
8713 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8714 }
8715 else
8716 {
8717 bfd_boolean narrow;
8718
8719 /* See if we can do this with a 16-bit instruction. */
8720 if (THUMB_SETS_FLAGS (inst.instruction))
8721 narrow = current_it_mask == 0;
8722 else
8723 narrow = current_it_mask != 0;
8724
8725 if (Rd > 7 || Rn > 7 || Rs > 7)
8726 narrow = FALSE;
8727 if (inst.operands[2].shifted)
8728 narrow = FALSE;
8729 if (inst.size_req == 4)
8730 narrow = FALSE;
8731
8732 if (narrow)
8733 {
8734 if (Rd == Rs)
8735 {
8736 inst.instruction = THUMB_OP16 (inst.instruction);
8737 inst.instruction |= Rd;
8738 inst.instruction |= Rn << 3;
8739 return;
8740 }
8741 if (Rd == Rn)
8742 {
8743 inst.instruction = THUMB_OP16 (inst.instruction);
8744 inst.instruction |= Rd;
8745 inst.instruction |= Rs << 3;
8746 return;
8747 }
8748 }
8749
8750 /* If we get here, it can't be done in 16 bits. */
8751 constraint (inst.operands[2].shifted
8752 && inst.operands[2].immisreg,
8753 _("shift must be constant"));
8754 inst.instruction = THUMB_OP32 (inst.instruction);
8755 inst.instruction |= Rd << 8;
8756 inst.instruction |= Rs << 16;
8757 encode_thumb32_shifted_operand (2);
8758 }
8759 }
8760 else
8761 {
8762 /* On its face this is a lie - the instruction does set the
8763 flags. However, the only supported mnemonic in this mode
8764 says it doesn't. */
8765 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8766
8767 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8768 _("unshifted register required"));
8769 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8770
8771 inst.instruction = THUMB_OP16 (inst.instruction);
8772 inst.instruction |= Rd;
8773
8774 if (Rd == Rs)
8775 inst.instruction |= Rn << 3;
8776 else if (Rd == Rn)
8777 inst.instruction |= Rs << 3;
8778 else
8779 constraint (1, _("dest must overlap one source register"));
8780 }
8781 }
8782
8783 static void
8784 do_t_barrier (void)
8785 {
8786 if (inst.operands[0].present)
8787 {
8788 constraint ((inst.instruction & 0xf0) != 0x40
8789 && inst.operands[0].imm != 0xf,
8790 "bad barrier type");
8791 inst.instruction |= inst.operands[0].imm;
8792 }
8793 else
8794 inst.instruction |= 0xf;
8795 }
8796
8797 static void
8798 do_t_bfc (void)
8799 {
8800 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8801 constraint (msb > 32, _("bit-field extends past end of register"));
8802 /* The instruction encoding stores the LSB and MSB,
8803 not the LSB and width. */
8804 inst.instruction |= inst.operands[0].reg << 8;
8805 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8806 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8807 inst.instruction |= msb - 1;
8808 }
8809
8810 static void
8811 do_t_bfi (void)
8812 {
8813 unsigned int msb;
8814
8815 /* #0 in second position is alternative syntax for bfc, which is
8816 the same instruction but with REG_PC in the Rm field. */
8817 if (!inst.operands[1].isreg)
8818 inst.operands[1].reg = REG_PC;
8819
8820 msb = inst.operands[2].imm + inst.operands[3].imm;
8821 constraint (msb > 32, _("bit-field extends past end of register"));
8822 /* The instruction encoding stores the LSB and MSB,
8823 not the LSB and width. */
8824 inst.instruction |= inst.operands[0].reg << 8;
8825 inst.instruction |= inst.operands[1].reg << 16;
8826 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8827 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8828 inst.instruction |= msb - 1;
8829 }
8830
8831 static void
8832 do_t_bfx (void)
8833 {
8834 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8835 _("bit-field extends past end of register"));
8836 inst.instruction |= inst.operands[0].reg << 8;
8837 inst.instruction |= inst.operands[1].reg << 16;
8838 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8839 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8840 inst.instruction |= inst.operands[3].imm - 1;
8841 }
8842
8843 /* ARM V5 Thumb BLX (argument parse)
8844 BLX <target_addr> which is BLX(1)
8845 BLX <Rm> which is BLX(2)
8846 Unfortunately, there are two different opcodes for this mnemonic.
8847 So, the insns[].value is not used, and the code here zaps values
8848 into inst.instruction.
8849
8850 ??? How to take advantage of the additional two bits of displacement
8851 available in Thumb32 mode? Need new relocation? */
8852
8853 static void
8854 do_t_blx (void)
8855 {
8856 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8857 if (inst.operands[0].isreg)
8858 /* We have a register, so this is BLX(2). */
8859 inst.instruction |= inst.operands[0].reg << 3;
8860 else
8861 {
8862 /* No register. This must be BLX(1). */
8863 inst.instruction = 0xf000e800;
8864 #ifdef OBJ_ELF
8865 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8866 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8867 else
8868 #endif
8869 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8870 inst.reloc.pc_rel = 1;
8871 }
8872 }
8873
8874 static void
8875 do_t_branch (void)
8876 {
8877 int opcode;
8878 int cond;
8879
8880 if (current_it_mask)
8881 {
8882 /* Conditional branches inside IT blocks are encoded as unconditional
8883 branches. */
8884 cond = COND_ALWAYS;
8885 /* A branch must be the last instruction in an IT block. */
8886 constraint (current_it_mask != 0x10, BAD_BRANCH);
8887 }
8888 else
8889 cond = inst.cond;
8890
8891 if (cond != COND_ALWAYS)
8892 opcode = T_MNEM_bcond;
8893 else
8894 opcode = inst.instruction;
8895
8896 if (unified_syntax && inst.size_req == 4)
8897 {
8898 inst.instruction = THUMB_OP32(opcode);
8899 if (cond == COND_ALWAYS)
8900 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8901 else
8902 {
8903 assert (cond != 0xF);
8904 inst.instruction |= cond << 22;
8905 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8906 }
8907 }
8908 else
8909 {
8910 inst.instruction = THUMB_OP16(opcode);
8911 if (cond == COND_ALWAYS)
8912 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8913 else
8914 {
8915 inst.instruction |= cond << 8;
8916 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8917 }
8918 /* Allow section relaxation. */
8919 if (unified_syntax && inst.size_req != 2)
8920 inst.relax = opcode;
8921 }
8922
8923 inst.reloc.pc_rel = 1;
8924 }
8925
8926 static void
8927 do_t_bkpt (void)
8928 {
8929 constraint (inst.cond != COND_ALWAYS,
8930 _("instruction is always unconditional"));
8931 if (inst.operands[0].present)
8932 {
8933 constraint (inst.operands[0].imm > 255,
8934 _("immediate value out of range"));
8935 inst.instruction |= inst.operands[0].imm;
8936 }
8937 }
8938
8939 static void
8940 do_t_branch23 (void)
8941 {
8942 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8943 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8944 inst.reloc.pc_rel = 1;
8945
8946 /* If the destination of the branch is a defined symbol which does not have
8947 the THUMB_FUNC attribute, then we must be calling a function which has
8948 the (interfacearm) attribute. We look for the Thumb entry point to that
8949 function and change the branch to refer to that function instead. */
8950 if ( inst.reloc.exp.X_op == O_symbol
8951 && inst.reloc.exp.X_add_symbol != NULL
8952 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8953 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8954 inst.reloc.exp.X_add_symbol =
8955 find_real_start (inst.reloc.exp.X_add_symbol);
8956 }
8957
8958 static void
8959 do_t_bx (void)
8960 {
8961 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8962 inst.instruction |= inst.operands[0].reg << 3;
8963 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8964 should cause the alignment to be checked once it is known. This is
8965 because BX PC only works if the instruction is word aligned. */
8966 }
8967
8968 static void
8969 do_t_bxj (void)
8970 {
8971 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8972 if (inst.operands[0].reg == REG_PC)
8973 as_tsktsk (_("use of r15 in bxj is not really useful"));
8974
8975 inst.instruction |= inst.operands[0].reg << 16;
8976 }
8977
8978 static void
8979 do_t_clz (void)
8980 {
8981 inst.instruction |= inst.operands[0].reg << 8;
8982 inst.instruction |= inst.operands[1].reg << 16;
8983 inst.instruction |= inst.operands[1].reg;
8984 }
8985
8986 static void
8987 do_t_cps (void)
8988 {
8989 constraint (current_it_mask, BAD_NOT_IT);
8990 inst.instruction |= inst.operands[0].imm;
8991 }
8992
8993 static void
8994 do_t_cpsi (void)
8995 {
8996 constraint (current_it_mask, BAD_NOT_IT);
8997 if (unified_syntax
8998 && (inst.operands[1].present || inst.size_req == 4)
8999 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
9000 {
9001 unsigned int imod = (inst.instruction & 0x0030) >> 4;
9002 inst.instruction = 0xf3af8000;
9003 inst.instruction |= imod << 9;
9004 inst.instruction |= inst.operands[0].imm << 5;
9005 if (inst.operands[1].present)
9006 inst.instruction |= 0x100 | inst.operands[1].imm;
9007 }
9008 else
9009 {
9010 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
9011 && (inst.operands[0].imm & 4),
9012 _("selected processor does not support 'A' form "
9013 "of this instruction"));
9014 constraint (inst.operands[1].present || inst.size_req == 4,
9015 _("Thumb does not support the 2-argument "
9016 "form of this instruction"));
9017 inst.instruction |= inst.operands[0].imm;
9018 }
9019 }
9020
9021 /* THUMB CPY instruction (argument parse). */
9022
9023 static void
9024 do_t_cpy (void)
9025 {
9026 if (inst.size_req == 4)
9027 {
9028 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9029 inst.instruction |= inst.operands[0].reg << 8;
9030 inst.instruction |= inst.operands[1].reg;
9031 }
9032 else
9033 {
9034 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9035 inst.instruction |= (inst.operands[0].reg & 0x7);
9036 inst.instruction |= inst.operands[1].reg << 3;
9037 }
9038 }
9039
9040 static void
9041 do_t_cbz (void)
9042 {
9043 constraint (current_it_mask, BAD_NOT_IT);
9044 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9045 inst.instruction |= inst.operands[0].reg;
9046 inst.reloc.pc_rel = 1;
9047 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9048 }
9049
9050 static void
9051 do_t_dbg (void)
9052 {
9053 inst.instruction |= inst.operands[0].imm;
9054 }
9055
9056 static void
9057 do_t_div (void)
9058 {
9059 if (!inst.operands[1].present)
9060 inst.operands[1].reg = inst.operands[0].reg;
9061 inst.instruction |= inst.operands[0].reg << 8;
9062 inst.instruction |= inst.operands[1].reg << 16;
9063 inst.instruction |= inst.operands[2].reg;
9064 }
9065
9066 static void
9067 do_t_hint (void)
9068 {
9069 if (unified_syntax && inst.size_req == 4)
9070 inst.instruction = THUMB_OP32 (inst.instruction);
9071 else
9072 inst.instruction = THUMB_OP16 (inst.instruction);
9073 }
9074
9075 static void
9076 do_t_it (void)
9077 {
9078 unsigned int cond = inst.operands[0].imm;
9079
9080 constraint (current_it_mask, BAD_NOT_IT);
9081 current_it_mask = (inst.instruction & 0xf) | 0x10;
9082 current_cc = cond;
9083
9084 /* If the condition is a negative condition, invert the mask. */
9085 if ((cond & 0x1) == 0x0)
9086 {
9087 unsigned int mask = inst.instruction & 0x000f;
9088
9089 if ((mask & 0x7) == 0)
9090 /* no conversion needed */;
9091 else if ((mask & 0x3) == 0)
9092 mask ^= 0x8;
9093 else if ((mask & 0x1) == 0)
9094 mask ^= 0xC;
9095 else
9096 mask ^= 0xE;
9097
9098 inst.instruction &= 0xfff0;
9099 inst.instruction |= mask;
9100 }
9101
9102 inst.instruction |= cond << 4;
9103 }
9104
9105 /* Helper function used for both push/pop and ldm/stm. */
9106 static void
9107 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9108 {
9109 bfd_boolean load;
9110
9111 load = (inst.instruction & (1 << 20)) != 0;
9112
9113 if (mask & (1 << 13))
9114 inst.error = _("SP not allowed in register list");
9115 if (load)
9116 {
9117 if (mask & (1 << 14)
9118 && mask & (1 << 15))
9119 inst.error = _("LR and PC should not both be in register list");
9120
9121 if ((mask & (1 << base)) != 0
9122 && writeback)
9123 as_warn (_("base register should not be in register list "
9124 "when written back"));
9125 }
9126 else
9127 {
9128 if (mask & (1 << 15))
9129 inst.error = _("PC not allowed in register list");
9130
9131 if (mask & (1 << base))
9132 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9133 }
9134
9135 if ((mask & (mask - 1)) == 0)
9136 {
9137 /* Single register transfers implemented as str/ldr. */
9138 if (writeback)
9139 {
9140 if (inst.instruction & (1 << 23))
9141 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9142 else
9143 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9144 }
9145 else
9146 {
9147 if (inst.instruction & (1 << 23))
9148 inst.instruction = 0x00800000; /* ia -> [base] */
9149 else
9150 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9151 }
9152
9153 inst.instruction |= 0xf8400000;
9154 if (load)
9155 inst.instruction |= 0x00100000;
9156
9157 mask = ffs(mask) - 1;
9158 mask <<= 12;
9159 }
9160 else if (writeback)
9161 inst.instruction |= WRITE_BACK;
9162
9163 inst.instruction |= mask;
9164 inst.instruction |= base << 16;
9165 }
9166
9167 static void
9168 do_t_ldmstm (void)
9169 {
9170 /* This really doesn't seem worth it. */
9171 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9172 _("expression too complex"));
9173 constraint (inst.operands[1].writeback,
9174 _("Thumb load/store multiple does not support {reglist}^"));
9175
9176 if (unified_syntax)
9177 {
9178 bfd_boolean narrow;
9179 unsigned mask;
9180
9181 narrow = FALSE;
9182 /* See if we can use a 16-bit instruction. */
9183 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9184 && inst.size_req != 4
9185 && !(inst.operands[1].imm & ~0xff))
9186 {
9187 mask = 1 << inst.operands[0].reg;
9188
9189 if (inst.operands[0].reg <= 7
9190 && (inst.instruction == T_MNEM_stmia
9191 ? inst.operands[0].writeback
9192 : (inst.operands[0].writeback
9193 == !(inst.operands[1].imm & mask))))
9194 {
9195 if (inst.instruction == T_MNEM_stmia
9196 && (inst.operands[1].imm & mask)
9197 && (inst.operands[1].imm & (mask - 1)))
9198 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9199 inst.operands[0].reg);
9200
9201 inst.instruction = THUMB_OP16 (inst.instruction);
9202 inst.instruction |= inst.operands[0].reg << 8;
9203 inst.instruction |= inst.operands[1].imm;
9204 narrow = TRUE;
9205 }
9206 else if (inst.operands[0] .reg == REG_SP
9207 && inst.operands[0].writeback)
9208 {
9209 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9210 ? T_MNEM_push : T_MNEM_pop);
9211 inst.instruction |= inst.operands[1].imm;
9212 narrow = TRUE;
9213 }
9214 }
9215
9216 if (!narrow)
9217 {
9218 if (inst.instruction < 0xffff)
9219 inst.instruction = THUMB_OP32 (inst.instruction);
9220
9221 encode_thumb2_ldmstm(inst.operands[0].reg, inst.operands[1].imm,
9222 inst.operands[0].writeback);
9223 }
9224 }
9225 else
9226 {
9227 constraint (inst.operands[0].reg > 7
9228 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9229 constraint (inst.instruction != T_MNEM_ldmia
9230 && inst.instruction != T_MNEM_stmia,
9231 _("Thumb-2 instruction only valid in unified syntax"));
9232 if (inst.instruction == T_MNEM_stmia)
9233 {
9234 if (!inst.operands[0].writeback)
9235 as_warn (_("this instruction will write back the base register"));
9236 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9237 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9238 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9239 inst.operands[0].reg);
9240 }
9241 else
9242 {
9243 if (!inst.operands[0].writeback
9244 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9245 as_warn (_("this instruction will write back the base register"));
9246 else if (inst.operands[0].writeback
9247 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9248 as_warn (_("this instruction will not write back the base register"));
9249 }
9250
9251 inst.instruction = THUMB_OP16 (inst.instruction);
9252 inst.instruction |= inst.operands[0].reg << 8;
9253 inst.instruction |= inst.operands[1].imm;
9254 }
9255 }
9256
9257 static void
9258 do_t_ldrex (void)
9259 {
9260 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9261 || inst.operands[1].postind || inst.operands[1].writeback
9262 || inst.operands[1].immisreg || inst.operands[1].shifted
9263 || inst.operands[1].negative,
9264 BAD_ADDR_MODE);
9265
9266 inst.instruction |= inst.operands[0].reg << 12;
9267 inst.instruction |= inst.operands[1].reg << 16;
9268 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9269 }
9270
9271 static void
9272 do_t_ldrexd (void)
9273 {
9274 if (!inst.operands[1].present)
9275 {
9276 constraint (inst.operands[0].reg == REG_LR,
9277 _("r14 not allowed as first register "
9278 "when second register is omitted"));
9279 inst.operands[1].reg = inst.operands[0].reg + 1;
9280 }
9281 constraint (inst.operands[0].reg == inst.operands[1].reg,
9282 BAD_OVERLAP);
9283
9284 inst.instruction |= inst.operands[0].reg << 12;
9285 inst.instruction |= inst.operands[1].reg << 8;
9286 inst.instruction |= inst.operands[2].reg << 16;
9287 }
9288
9289 static void
9290 do_t_ldst (void)
9291 {
9292 unsigned long opcode;
9293 int Rn;
9294
9295 opcode = inst.instruction;
9296 if (unified_syntax)
9297 {
9298 if (!inst.operands[1].isreg)
9299 {
9300 if (opcode <= 0xffff)
9301 inst.instruction = THUMB_OP32 (opcode);
9302 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9303 return;
9304 }
9305 if (inst.operands[1].isreg
9306 && !inst.operands[1].writeback
9307 && !inst.operands[1].shifted && !inst.operands[1].postind
9308 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9309 && opcode <= 0xffff
9310 && inst.size_req != 4)
9311 {
9312 /* Insn may have a 16-bit form. */
9313 Rn = inst.operands[1].reg;
9314 if (inst.operands[1].immisreg)
9315 {
9316 inst.instruction = THUMB_OP16 (opcode);
9317 /* [Rn, Ri] */
9318 if (Rn <= 7 && inst.operands[1].imm <= 7)
9319 goto op16;
9320 }
9321 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9322 && opcode != T_MNEM_ldrsb)
9323 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9324 || (Rn == REG_SP && opcode == T_MNEM_str))
9325 {
9326 /* [Rn, #const] */
9327 if (Rn > 7)
9328 {
9329 if (Rn == REG_PC)
9330 {
9331 if (inst.reloc.pc_rel)
9332 opcode = T_MNEM_ldr_pc2;
9333 else
9334 opcode = T_MNEM_ldr_pc;
9335 }
9336 else
9337 {
9338 if (opcode == T_MNEM_ldr)
9339 opcode = T_MNEM_ldr_sp;
9340 else
9341 opcode = T_MNEM_str_sp;
9342 }
9343 inst.instruction = inst.operands[0].reg << 8;
9344 }
9345 else
9346 {
9347 inst.instruction = inst.operands[0].reg;
9348 inst.instruction |= inst.operands[1].reg << 3;
9349 }
9350 inst.instruction |= THUMB_OP16 (opcode);
9351 if (inst.size_req == 2)
9352 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9353 else
9354 inst.relax = opcode;
9355 return;
9356 }
9357 }
9358 /* Definitely a 32-bit variant. */
9359 inst.instruction = THUMB_OP32 (opcode);
9360 inst.instruction |= inst.operands[0].reg << 12;
9361 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9362 return;
9363 }
9364
9365 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9366
9367 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9368 {
9369 /* Only [Rn,Rm] is acceptable. */
9370 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9371 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9372 || inst.operands[1].postind || inst.operands[1].shifted
9373 || inst.operands[1].negative,
9374 _("Thumb does not support this addressing mode"));
9375 inst.instruction = THUMB_OP16 (inst.instruction);
9376 goto op16;
9377 }
9378
9379 inst.instruction = THUMB_OP16 (inst.instruction);
9380 if (!inst.operands[1].isreg)
9381 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9382 return;
9383
9384 constraint (!inst.operands[1].preind
9385 || inst.operands[1].shifted
9386 || inst.operands[1].writeback,
9387 _("Thumb does not support this addressing mode"));
9388 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9389 {
9390 constraint (inst.instruction & 0x0600,
9391 _("byte or halfword not valid for base register"));
9392 constraint (inst.operands[1].reg == REG_PC
9393 && !(inst.instruction & THUMB_LOAD_BIT),
9394 _("r15 based store not allowed"));
9395 constraint (inst.operands[1].immisreg,
9396 _("invalid base register for register offset"));
9397
9398 if (inst.operands[1].reg == REG_PC)
9399 inst.instruction = T_OPCODE_LDR_PC;
9400 else if (inst.instruction & THUMB_LOAD_BIT)
9401 inst.instruction = T_OPCODE_LDR_SP;
9402 else
9403 inst.instruction = T_OPCODE_STR_SP;
9404
9405 inst.instruction |= inst.operands[0].reg << 8;
9406 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9407 return;
9408 }
9409
9410 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9411 if (!inst.operands[1].immisreg)
9412 {
9413 /* Immediate offset. */
9414 inst.instruction |= inst.operands[0].reg;
9415 inst.instruction |= inst.operands[1].reg << 3;
9416 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9417 return;
9418 }
9419
9420 /* Register offset. */
9421 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9422 constraint (inst.operands[1].negative,
9423 _("Thumb does not support this addressing mode"));
9424
9425 op16:
9426 switch (inst.instruction)
9427 {
9428 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9429 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9430 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9431 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9432 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9433 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9434 case 0x5600 /* ldrsb */:
9435 case 0x5e00 /* ldrsh */: break;
9436 default: abort ();
9437 }
9438
9439 inst.instruction |= inst.operands[0].reg;
9440 inst.instruction |= inst.operands[1].reg << 3;
9441 inst.instruction |= inst.operands[1].imm << 6;
9442 }
9443
9444 static void
9445 do_t_ldstd (void)
9446 {
9447 if (!inst.operands[1].present)
9448 {
9449 inst.operands[1].reg = inst.operands[0].reg + 1;
9450 constraint (inst.operands[0].reg == REG_LR,
9451 _("r14 not allowed here"));
9452 }
9453 inst.instruction |= inst.operands[0].reg << 12;
9454 inst.instruction |= inst.operands[1].reg << 8;
9455 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9456
9457 }
9458
9459 static void
9460 do_t_ldstt (void)
9461 {
9462 inst.instruction |= inst.operands[0].reg << 12;
9463 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9464 }
9465
9466 static void
9467 do_t_mla (void)
9468 {
9469 inst.instruction |= inst.operands[0].reg << 8;
9470 inst.instruction |= inst.operands[1].reg << 16;
9471 inst.instruction |= inst.operands[2].reg;
9472 inst.instruction |= inst.operands[3].reg << 12;
9473 }
9474
9475 static void
9476 do_t_mlal (void)
9477 {
9478 inst.instruction |= inst.operands[0].reg << 12;
9479 inst.instruction |= inst.operands[1].reg << 8;
9480 inst.instruction |= inst.operands[2].reg << 16;
9481 inst.instruction |= inst.operands[3].reg;
9482 }
9483
9484 static void
9485 do_t_mov_cmp (void)
9486 {
9487 if (unified_syntax)
9488 {
9489 int r0off = (inst.instruction == T_MNEM_mov
9490 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9491 unsigned long opcode;
9492 bfd_boolean narrow;
9493 bfd_boolean low_regs;
9494
9495 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
9496 opcode = inst.instruction;
9497 if (current_it_mask)
9498 narrow = opcode != T_MNEM_movs;
9499 else
9500 narrow = opcode != T_MNEM_movs || low_regs;
9501 if (inst.size_req == 4
9502 || inst.operands[1].shifted)
9503 narrow = FALSE;
9504
9505 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
9506 if (opcode == T_MNEM_movs && inst.operands[1].isreg
9507 && !inst.operands[1].shifted
9508 && inst.operands[0].reg == REG_PC
9509 && inst.operands[1].reg == REG_LR)
9510 {
9511 inst.instruction = T2_SUBS_PC_LR;
9512 return;
9513 }
9514
9515 if (!inst.operands[1].isreg)
9516 {
9517 /* Immediate operand. */
9518 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9519 narrow = 0;
9520 if (low_regs && narrow)
9521 {
9522 inst.instruction = THUMB_OP16 (opcode);
9523 inst.instruction |= inst.operands[0].reg << 8;
9524 if (inst.size_req == 2)
9525 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9526 else
9527 inst.relax = opcode;
9528 }
9529 else
9530 {
9531 inst.instruction = THUMB_OP32 (inst.instruction);
9532 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9533 inst.instruction |= inst.operands[0].reg << r0off;
9534 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9535 }
9536 }
9537 else if (!narrow)
9538 {
9539 inst.instruction = THUMB_OP32 (inst.instruction);
9540 inst.instruction |= inst.operands[0].reg << r0off;
9541 encode_thumb32_shifted_operand (1);
9542 }
9543 else
9544 switch (inst.instruction)
9545 {
9546 case T_MNEM_mov:
9547 inst.instruction = T_OPCODE_MOV_HR;
9548 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9549 inst.instruction |= (inst.operands[0].reg & 0x7);
9550 inst.instruction |= inst.operands[1].reg << 3;
9551 break;
9552
9553 case T_MNEM_movs:
9554 /* We know we have low registers at this point.
9555 Generate ADD Rd, Rs, #0. */
9556 inst.instruction = T_OPCODE_ADD_I3;
9557 inst.instruction |= inst.operands[0].reg;
9558 inst.instruction |= inst.operands[1].reg << 3;
9559 break;
9560
9561 case T_MNEM_cmp:
9562 if (low_regs)
9563 {
9564 inst.instruction = T_OPCODE_CMP_LR;
9565 inst.instruction |= inst.operands[0].reg;
9566 inst.instruction |= inst.operands[1].reg << 3;
9567 }
9568 else
9569 {
9570 inst.instruction = T_OPCODE_CMP_HR;
9571 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9572 inst.instruction |= (inst.operands[0].reg & 0x7);
9573 inst.instruction |= inst.operands[1].reg << 3;
9574 }
9575 break;
9576 }
9577 return;
9578 }
9579
9580 inst.instruction = THUMB_OP16 (inst.instruction);
9581 if (inst.operands[1].isreg)
9582 {
9583 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
9584 {
9585 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9586 since a MOV instruction produces unpredictable results. */
9587 if (inst.instruction == T_OPCODE_MOV_I8)
9588 inst.instruction = T_OPCODE_ADD_I3;
9589 else
9590 inst.instruction = T_OPCODE_CMP_LR;
9591
9592 inst.instruction |= inst.operands[0].reg;
9593 inst.instruction |= inst.operands[1].reg << 3;
9594 }
9595 else
9596 {
9597 if (inst.instruction == T_OPCODE_MOV_I8)
9598 inst.instruction = T_OPCODE_MOV_HR;
9599 else
9600 inst.instruction = T_OPCODE_CMP_HR;
9601 do_t_cpy ();
9602 }
9603 }
9604 else
9605 {
9606 constraint (inst.operands[0].reg > 7,
9607 _("only lo regs allowed with immediate"));
9608 inst.instruction |= inst.operands[0].reg << 8;
9609 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9610 }
9611 }
9612
9613 static void
9614 do_t_mov16 (void)
9615 {
9616 bfd_vma imm;
9617 bfd_boolean top;
9618
9619 top = (inst.instruction & 0x00800000) != 0;
9620 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9621 {
9622 constraint (top, _(":lower16: not allowed this instruction"));
9623 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9624 }
9625 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9626 {
9627 constraint (!top, _(":upper16: not allowed this instruction"));
9628 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9629 }
9630
9631 inst.instruction |= inst.operands[0].reg << 8;
9632 if (inst.reloc.type == BFD_RELOC_UNUSED)
9633 {
9634 imm = inst.reloc.exp.X_add_number;
9635 inst.instruction |= (imm & 0xf000) << 4;
9636 inst.instruction |= (imm & 0x0800) << 15;
9637 inst.instruction |= (imm & 0x0700) << 4;
9638 inst.instruction |= (imm & 0x00ff);
9639 }
9640 }
9641
9642 static void
9643 do_t_mvn_tst (void)
9644 {
9645 if (unified_syntax)
9646 {
9647 int r0off = (inst.instruction == T_MNEM_mvn
9648 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9649 bfd_boolean narrow;
9650
9651 if (inst.size_req == 4
9652 || inst.instruction > 0xffff
9653 || inst.operands[1].shifted
9654 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9655 narrow = FALSE;
9656 else if (inst.instruction == T_MNEM_cmn)
9657 narrow = TRUE;
9658 else if (THUMB_SETS_FLAGS (inst.instruction))
9659 narrow = (current_it_mask == 0);
9660 else
9661 narrow = (current_it_mask != 0);
9662
9663 if (!inst.operands[1].isreg)
9664 {
9665 /* For an immediate, we always generate a 32-bit opcode;
9666 section relaxation will shrink it later if possible. */
9667 if (inst.instruction < 0xffff)
9668 inst.instruction = THUMB_OP32 (inst.instruction);
9669 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9670 inst.instruction |= inst.operands[0].reg << r0off;
9671 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9672 }
9673 else
9674 {
9675 /* See if we can do this with a 16-bit instruction. */
9676 if (narrow)
9677 {
9678 inst.instruction = THUMB_OP16 (inst.instruction);
9679 inst.instruction |= inst.operands[0].reg;
9680 inst.instruction |= inst.operands[1].reg << 3;
9681 }
9682 else
9683 {
9684 constraint (inst.operands[1].shifted
9685 && inst.operands[1].immisreg,
9686 _("shift must be constant"));
9687 if (inst.instruction < 0xffff)
9688 inst.instruction = THUMB_OP32 (inst.instruction);
9689 inst.instruction |= inst.operands[0].reg << r0off;
9690 encode_thumb32_shifted_operand (1);
9691 }
9692 }
9693 }
9694 else
9695 {
9696 constraint (inst.instruction > 0xffff
9697 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9698 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9699 _("unshifted register required"));
9700 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9701 BAD_HIREG);
9702
9703 inst.instruction = THUMB_OP16 (inst.instruction);
9704 inst.instruction |= inst.operands[0].reg;
9705 inst.instruction |= inst.operands[1].reg << 3;
9706 }
9707 }
9708
9709 static void
9710 do_t_mrs (void)
9711 {
9712 int flags;
9713
9714 if (do_vfp_nsyn_mrs () == SUCCESS)
9715 return;
9716
9717 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9718 if (flags == 0)
9719 {
9720 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9721 _("selected processor does not support "
9722 "requested special purpose register"));
9723 }
9724 else
9725 {
9726 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9727 _("selected processor does not support "
9728 "requested special purpose register %x"));
9729 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9730 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9731 _("'CPSR' or 'SPSR' expected"));
9732 }
9733
9734 inst.instruction |= inst.operands[0].reg << 8;
9735 inst.instruction |= (flags & SPSR_BIT) >> 2;
9736 inst.instruction |= inst.operands[1].imm & 0xff;
9737 }
9738
9739 static void
9740 do_t_msr (void)
9741 {
9742 int flags;
9743
9744 if (do_vfp_nsyn_msr () == SUCCESS)
9745 return;
9746
9747 constraint (!inst.operands[1].isreg,
9748 _("Thumb encoding does not support an immediate here"));
9749 flags = inst.operands[0].imm;
9750 if (flags & ~0xff)
9751 {
9752 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9753 _("selected processor does not support "
9754 "requested special purpose register"));
9755 }
9756 else
9757 {
9758 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9759 _("selected processor does not support "
9760 "requested special purpose register"));
9761 flags |= PSR_f;
9762 }
9763 inst.instruction |= (flags & SPSR_BIT) >> 2;
9764 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9765 inst.instruction |= (flags & 0xff);
9766 inst.instruction |= inst.operands[1].reg << 16;
9767 }
9768
9769 static void
9770 do_t_mul (void)
9771 {
9772 if (!inst.operands[2].present)
9773 inst.operands[2].reg = inst.operands[0].reg;
9774
9775 /* There is no 32-bit MULS and no 16-bit MUL. */
9776 if (unified_syntax && inst.instruction == T_MNEM_mul)
9777 {
9778 inst.instruction = THUMB_OP32 (inst.instruction);
9779 inst.instruction |= inst.operands[0].reg << 8;
9780 inst.instruction |= inst.operands[1].reg << 16;
9781 inst.instruction |= inst.operands[2].reg << 0;
9782 }
9783 else
9784 {
9785 constraint (!unified_syntax
9786 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9787 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9788 BAD_HIREG);
9789
9790 inst.instruction = THUMB_OP16 (inst.instruction);
9791 inst.instruction |= inst.operands[0].reg;
9792
9793 if (inst.operands[0].reg == inst.operands[1].reg)
9794 inst.instruction |= inst.operands[2].reg << 3;
9795 else if (inst.operands[0].reg == inst.operands[2].reg)
9796 inst.instruction |= inst.operands[1].reg << 3;
9797 else
9798 constraint (1, _("dest must overlap one source register"));
9799 }
9800 }
9801
9802 static void
9803 do_t_mull (void)
9804 {
9805 inst.instruction |= inst.operands[0].reg << 12;
9806 inst.instruction |= inst.operands[1].reg << 8;
9807 inst.instruction |= inst.operands[2].reg << 16;
9808 inst.instruction |= inst.operands[3].reg;
9809
9810 if (inst.operands[0].reg == inst.operands[1].reg)
9811 as_tsktsk (_("rdhi and rdlo must be different"));
9812 }
9813
9814 static void
9815 do_t_nop (void)
9816 {
9817 if (unified_syntax)
9818 {
9819 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9820 {
9821 inst.instruction = THUMB_OP32 (inst.instruction);
9822 inst.instruction |= inst.operands[0].imm;
9823 }
9824 else
9825 {
9826 inst.instruction = THUMB_OP16 (inst.instruction);
9827 inst.instruction |= inst.operands[0].imm << 4;
9828 }
9829 }
9830 else
9831 {
9832 constraint (inst.operands[0].present,
9833 _("Thumb does not support NOP with hints"));
9834 inst.instruction = 0x46c0;
9835 }
9836 }
9837
9838 static void
9839 do_t_neg (void)
9840 {
9841 if (unified_syntax)
9842 {
9843 bfd_boolean narrow;
9844
9845 if (THUMB_SETS_FLAGS (inst.instruction))
9846 narrow = (current_it_mask == 0);
9847 else
9848 narrow = (current_it_mask != 0);
9849 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9850 narrow = FALSE;
9851 if (inst.size_req == 4)
9852 narrow = FALSE;
9853
9854 if (!narrow)
9855 {
9856 inst.instruction = THUMB_OP32 (inst.instruction);
9857 inst.instruction |= inst.operands[0].reg << 8;
9858 inst.instruction |= inst.operands[1].reg << 16;
9859 }
9860 else
9861 {
9862 inst.instruction = THUMB_OP16 (inst.instruction);
9863 inst.instruction |= inst.operands[0].reg;
9864 inst.instruction |= inst.operands[1].reg << 3;
9865 }
9866 }
9867 else
9868 {
9869 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9870 BAD_HIREG);
9871 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9872
9873 inst.instruction = THUMB_OP16 (inst.instruction);
9874 inst.instruction |= inst.operands[0].reg;
9875 inst.instruction |= inst.operands[1].reg << 3;
9876 }
9877 }
9878
9879 static void
9880 do_t_pkhbt (void)
9881 {
9882 inst.instruction |= inst.operands[0].reg << 8;
9883 inst.instruction |= inst.operands[1].reg << 16;
9884 inst.instruction |= inst.operands[2].reg;
9885 if (inst.operands[3].present)
9886 {
9887 unsigned int val = inst.reloc.exp.X_add_number;
9888 constraint (inst.reloc.exp.X_op != O_constant,
9889 _("expression too complex"));
9890 inst.instruction |= (val & 0x1c) << 10;
9891 inst.instruction |= (val & 0x03) << 6;
9892 }
9893 }
9894
9895 static void
9896 do_t_pkhtb (void)
9897 {
9898 if (!inst.operands[3].present)
9899 inst.instruction &= ~0x00000020;
9900 do_t_pkhbt ();
9901 }
9902
9903 static void
9904 do_t_pld (void)
9905 {
9906 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9907 }
9908
9909 static void
9910 do_t_push_pop (void)
9911 {
9912 unsigned mask;
9913
9914 constraint (inst.operands[0].writeback,
9915 _("push/pop do not support {reglist}^"));
9916 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9917 _("expression too complex"));
9918
9919 mask = inst.operands[0].imm;
9920 if ((mask & ~0xff) == 0)
9921 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
9922 else if ((inst.instruction == T_MNEM_push
9923 && (mask & ~0xff) == 1 << REG_LR)
9924 || (inst.instruction == T_MNEM_pop
9925 && (mask & ~0xff) == 1 << REG_PC))
9926 {
9927 inst.instruction = THUMB_OP16 (inst.instruction);
9928 inst.instruction |= THUMB_PP_PC_LR;
9929 inst.instruction |= mask & 0xff;
9930 }
9931 else if (unified_syntax)
9932 {
9933 inst.instruction = THUMB_OP32 (inst.instruction);
9934 encode_thumb2_ldmstm(13, mask, TRUE);
9935 }
9936 else
9937 {
9938 inst.error = _("invalid register list to push/pop instruction");
9939 return;
9940 }
9941 }
9942
9943 static void
9944 do_t_rbit (void)
9945 {
9946 inst.instruction |= inst.operands[0].reg << 8;
9947 inst.instruction |= inst.operands[1].reg << 16;
9948 }
9949
9950 static void
9951 do_t_rev (void)
9952 {
9953 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9954 && inst.size_req != 4)
9955 {
9956 inst.instruction = THUMB_OP16 (inst.instruction);
9957 inst.instruction |= inst.operands[0].reg;
9958 inst.instruction |= inst.operands[1].reg << 3;
9959 }
9960 else if (unified_syntax)
9961 {
9962 inst.instruction = THUMB_OP32 (inst.instruction);
9963 inst.instruction |= inst.operands[0].reg << 8;
9964 inst.instruction |= inst.operands[1].reg << 16;
9965 inst.instruction |= inst.operands[1].reg;
9966 }
9967 else
9968 inst.error = BAD_HIREG;
9969 }
9970
9971 static void
9972 do_t_rsb (void)
9973 {
9974 int Rd, Rs;
9975
9976 Rd = inst.operands[0].reg;
9977 Rs = (inst.operands[1].present
9978 ? inst.operands[1].reg /* Rd, Rs, foo */
9979 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9980
9981 inst.instruction |= Rd << 8;
9982 inst.instruction |= Rs << 16;
9983 if (!inst.operands[2].isreg)
9984 {
9985 bfd_boolean narrow;
9986
9987 if ((inst.instruction & 0x00100000) != 0)
9988 narrow = (current_it_mask == 0);
9989 else
9990 narrow = (current_it_mask != 0);
9991
9992 if (Rd > 7 || Rs > 7)
9993 narrow = FALSE;
9994
9995 if (inst.size_req == 4 || !unified_syntax)
9996 narrow = FALSE;
9997
9998 if (inst.reloc.exp.X_op != O_constant
9999 || inst.reloc.exp.X_add_number != 0)
10000 narrow = FALSE;
10001
10002 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10003 relaxation, but it doesn't seem worth the hassle. */
10004 if (narrow)
10005 {
10006 inst.reloc.type = BFD_RELOC_UNUSED;
10007 inst.instruction = THUMB_OP16 (T_MNEM_negs);
10008 inst.instruction |= Rs << 3;
10009 inst.instruction |= Rd;
10010 }
10011 else
10012 {
10013 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10014 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10015 }
10016 }
10017 else
10018 encode_thumb32_shifted_operand (2);
10019 }
10020
10021 static void
10022 do_t_setend (void)
10023 {
10024 constraint (current_it_mask, BAD_NOT_IT);
10025 if (inst.operands[0].imm)
10026 inst.instruction |= 0x8;
10027 }
10028
10029 static void
10030 do_t_shift (void)
10031 {
10032 if (!inst.operands[1].present)
10033 inst.operands[1].reg = inst.operands[0].reg;
10034
10035 if (unified_syntax)
10036 {
10037 bfd_boolean narrow;
10038 int shift_kind;
10039
10040 switch (inst.instruction)
10041 {
10042 case T_MNEM_asr:
10043 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
10044 case T_MNEM_lsl:
10045 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
10046 case T_MNEM_lsr:
10047 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
10048 case T_MNEM_ror:
10049 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
10050 default: abort ();
10051 }
10052
10053 if (THUMB_SETS_FLAGS (inst.instruction))
10054 narrow = (current_it_mask == 0);
10055 else
10056 narrow = (current_it_mask != 0);
10057 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10058 narrow = FALSE;
10059 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
10060 narrow = FALSE;
10061 if (inst.operands[2].isreg
10062 && (inst.operands[1].reg != inst.operands[0].reg
10063 || inst.operands[2].reg > 7))
10064 narrow = FALSE;
10065 if (inst.size_req == 4)
10066 narrow = FALSE;
10067
10068 if (!narrow)
10069 {
10070 if (inst.operands[2].isreg)
10071 {
10072 inst.instruction = THUMB_OP32 (inst.instruction);
10073 inst.instruction |= inst.operands[0].reg << 8;
10074 inst.instruction |= inst.operands[1].reg << 16;
10075 inst.instruction |= inst.operands[2].reg;
10076 }
10077 else
10078 {
10079 inst.operands[1].shifted = 1;
10080 inst.operands[1].shift_kind = shift_kind;
10081 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
10082 ? T_MNEM_movs : T_MNEM_mov);
10083 inst.instruction |= inst.operands[0].reg << 8;
10084 encode_thumb32_shifted_operand (1);
10085 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10086 inst.reloc.type = BFD_RELOC_UNUSED;
10087 }
10088 }
10089 else
10090 {
10091 if (inst.operands[2].isreg)
10092 {
10093 switch (shift_kind)
10094 {
10095 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
10096 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
10097 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
10098 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
10099 default: abort ();
10100 }
10101
10102 inst.instruction |= inst.operands[0].reg;
10103 inst.instruction |= inst.operands[2].reg << 3;
10104 }
10105 else
10106 {
10107 switch (shift_kind)
10108 {
10109 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10110 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10111 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10112 default: abort ();
10113 }
10114 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10115 inst.instruction |= inst.operands[0].reg;
10116 inst.instruction |= inst.operands[1].reg << 3;
10117 }
10118 }
10119 }
10120 else
10121 {
10122 constraint (inst.operands[0].reg > 7
10123 || inst.operands[1].reg > 7, BAD_HIREG);
10124 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10125
10126 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
10127 {
10128 constraint (inst.operands[2].reg > 7, BAD_HIREG);
10129 constraint (inst.operands[0].reg != inst.operands[1].reg,
10130 _("source1 and dest must be same register"));
10131
10132 switch (inst.instruction)
10133 {
10134 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
10135 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
10136 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
10137 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10138 default: abort ();
10139 }
10140
10141 inst.instruction |= inst.operands[0].reg;
10142 inst.instruction |= inst.operands[2].reg << 3;
10143 }
10144 else
10145 {
10146 switch (inst.instruction)
10147 {
10148 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10149 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10150 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10151 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10152 default: abort ();
10153 }
10154 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10155 inst.instruction |= inst.operands[0].reg;
10156 inst.instruction |= inst.operands[1].reg << 3;
10157 }
10158 }
10159 }
10160
10161 static void
10162 do_t_simd (void)
10163 {
10164 inst.instruction |= inst.operands[0].reg << 8;
10165 inst.instruction |= inst.operands[1].reg << 16;
10166 inst.instruction |= inst.operands[2].reg;
10167 }
10168
10169 static void
10170 do_t_smc (void)
10171 {
10172 unsigned int value = inst.reloc.exp.X_add_number;
10173 constraint (inst.reloc.exp.X_op != O_constant,
10174 _("expression too complex"));
10175 inst.reloc.type = BFD_RELOC_UNUSED;
10176 inst.instruction |= (value & 0xf000) >> 12;
10177 inst.instruction |= (value & 0x0ff0);
10178 inst.instruction |= (value & 0x000f) << 16;
10179 }
10180
10181 static void
10182 do_t_ssat (void)
10183 {
10184 inst.instruction |= inst.operands[0].reg << 8;
10185 inst.instruction |= inst.operands[1].imm - 1;
10186 inst.instruction |= inst.operands[2].reg << 16;
10187
10188 if (inst.operands[3].present)
10189 {
10190 constraint (inst.reloc.exp.X_op != O_constant,
10191 _("expression too complex"));
10192
10193 if (inst.reloc.exp.X_add_number != 0)
10194 {
10195 if (inst.operands[3].shift_kind == SHIFT_ASR)
10196 inst.instruction |= 0x00200000; /* sh bit */
10197 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10198 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10199 }
10200 inst.reloc.type = BFD_RELOC_UNUSED;
10201 }
10202 }
10203
10204 static void
10205 do_t_ssat16 (void)
10206 {
10207 inst.instruction |= inst.operands[0].reg << 8;
10208 inst.instruction |= inst.operands[1].imm - 1;
10209 inst.instruction |= inst.operands[2].reg << 16;
10210 }
10211
10212 static void
10213 do_t_strex (void)
10214 {
10215 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10216 || inst.operands[2].postind || inst.operands[2].writeback
10217 || inst.operands[2].immisreg || inst.operands[2].shifted
10218 || inst.operands[2].negative,
10219 BAD_ADDR_MODE);
10220
10221 inst.instruction |= inst.operands[0].reg << 8;
10222 inst.instruction |= inst.operands[1].reg << 12;
10223 inst.instruction |= inst.operands[2].reg << 16;
10224 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10225 }
10226
10227 static void
10228 do_t_strexd (void)
10229 {
10230 if (!inst.operands[2].present)
10231 inst.operands[2].reg = inst.operands[1].reg + 1;
10232
10233 constraint (inst.operands[0].reg == inst.operands[1].reg
10234 || inst.operands[0].reg == inst.operands[2].reg
10235 || inst.operands[0].reg == inst.operands[3].reg
10236 || inst.operands[1].reg == inst.operands[2].reg,
10237 BAD_OVERLAP);
10238
10239 inst.instruction |= inst.operands[0].reg;
10240 inst.instruction |= inst.operands[1].reg << 12;
10241 inst.instruction |= inst.operands[2].reg << 8;
10242 inst.instruction |= inst.operands[3].reg << 16;
10243 }
10244
10245 static void
10246 do_t_sxtah (void)
10247 {
10248 inst.instruction |= inst.operands[0].reg << 8;
10249 inst.instruction |= inst.operands[1].reg << 16;
10250 inst.instruction |= inst.operands[2].reg;
10251 inst.instruction |= inst.operands[3].imm << 4;
10252 }
10253
10254 static void
10255 do_t_sxth (void)
10256 {
10257 if (inst.instruction <= 0xffff && inst.size_req != 4
10258 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10259 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10260 {
10261 inst.instruction = THUMB_OP16 (inst.instruction);
10262 inst.instruction |= inst.operands[0].reg;
10263 inst.instruction |= inst.operands[1].reg << 3;
10264 }
10265 else if (unified_syntax)
10266 {
10267 if (inst.instruction <= 0xffff)
10268 inst.instruction = THUMB_OP32 (inst.instruction);
10269 inst.instruction |= inst.operands[0].reg << 8;
10270 inst.instruction |= inst.operands[1].reg;
10271 inst.instruction |= inst.operands[2].imm << 4;
10272 }
10273 else
10274 {
10275 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10276 _("Thumb encoding does not support rotation"));
10277 constraint (1, BAD_HIREG);
10278 }
10279 }
10280
10281 static void
10282 do_t_swi (void)
10283 {
10284 inst.reloc.type = BFD_RELOC_ARM_SWI;
10285 }
10286
10287 static void
10288 do_t_tb (void)
10289 {
10290 int half;
10291
10292 half = (inst.instruction & 0x10) != 0;
10293 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10294 constraint (inst.operands[0].immisreg,
10295 _("instruction requires register index"));
10296 constraint (inst.operands[0].imm == 15,
10297 _("PC is not a valid index register"));
10298 constraint (!half && inst.operands[0].shifted,
10299 _("instruction does not allow shifted index"));
10300 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
10301 }
10302
10303 static void
10304 do_t_usat (void)
10305 {
10306 inst.instruction |= inst.operands[0].reg << 8;
10307 inst.instruction |= inst.operands[1].imm;
10308 inst.instruction |= inst.operands[2].reg << 16;
10309
10310 if (inst.operands[3].present)
10311 {
10312 constraint (inst.reloc.exp.X_op != O_constant,
10313 _("expression too complex"));
10314 if (inst.reloc.exp.X_add_number != 0)
10315 {
10316 if (inst.operands[3].shift_kind == SHIFT_ASR)
10317 inst.instruction |= 0x00200000; /* sh bit */
10318
10319 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10320 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10321 }
10322 inst.reloc.type = BFD_RELOC_UNUSED;
10323 }
10324 }
10325
10326 static void
10327 do_t_usat16 (void)
10328 {
10329 inst.instruction |= inst.operands[0].reg << 8;
10330 inst.instruction |= inst.operands[1].imm;
10331 inst.instruction |= inst.operands[2].reg << 16;
10332 }
10333
10334 /* Neon instruction encoder helpers. */
10335
10336 /* Encodings for the different types for various Neon opcodes. */
10337
10338 /* An "invalid" code for the following tables. */
10339 #define N_INV -1u
10340
10341 struct neon_tab_entry
10342 {
10343 unsigned integer;
10344 unsigned float_or_poly;
10345 unsigned scalar_or_imm;
10346 };
10347
10348 /* Map overloaded Neon opcodes to their respective encodings. */
10349 #define NEON_ENC_TAB \
10350 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10351 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10352 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10353 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10354 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10355 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10356 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10357 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10358 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10359 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10360 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10361 /* Register variants of the following two instructions are encoded as
10362 vcge / vcgt with the operands reversed. */ \
10363 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10364 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10365 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10366 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10367 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10368 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10369 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10370 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10371 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10372 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10373 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10374 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10375 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10376 X(vshl, 0x0000400, N_INV, 0x0800510), \
10377 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10378 X(vand, 0x0000110, N_INV, 0x0800030), \
10379 X(vbic, 0x0100110, N_INV, 0x0800030), \
10380 X(veor, 0x1000110, N_INV, N_INV), \
10381 X(vorn, 0x0300110, N_INV, 0x0800010), \
10382 X(vorr, 0x0200110, N_INV, 0x0800010), \
10383 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10384 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10385 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10386 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10387 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10388 X(vst1, 0x0000000, 0x0800000, N_INV), \
10389 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10390 X(vst2, 0x0000100, 0x0800100, N_INV), \
10391 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10392 X(vst3, 0x0000200, 0x0800200, N_INV), \
10393 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10394 X(vst4, 0x0000300, 0x0800300, N_INV), \
10395 X(vmovn, 0x1b20200, N_INV, N_INV), \
10396 X(vtrn, 0x1b20080, N_INV, N_INV), \
10397 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10398 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10399 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10400 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10401 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10402 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10403 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10404 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10405 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10406
10407 enum neon_opc
10408 {
10409 #define X(OPC,I,F,S) N_MNEM_##OPC
10410 NEON_ENC_TAB
10411 #undef X
10412 };
10413
10414 static const struct neon_tab_entry neon_enc_tab[] =
10415 {
10416 #define X(OPC,I,F,S) { (I), (F), (S) }
10417 NEON_ENC_TAB
10418 #undef X
10419 };
10420
10421 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10422 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10423 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10424 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10425 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10426 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10427 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10428 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10429 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10430 #define NEON_ENC_SINGLE(X) \
10431 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10432 #define NEON_ENC_DOUBLE(X) \
10433 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10434
10435 /* Define shapes for instruction operands. The following mnemonic characters
10436 are used in this table:
10437
10438 F - VFP S<n> register
10439 D - Neon D<n> register
10440 Q - Neon Q<n> register
10441 I - Immediate
10442 S - Scalar
10443 R - ARM register
10444 L - D<n> register list
10445
10446 This table is used to generate various data:
10447 - enumerations of the form NS_DDR to be used as arguments to
10448 neon_select_shape.
10449 - a table classifying shapes into single, double, quad, mixed.
10450 - a table used to drive neon_select_shape.
10451 */
10452
10453 #define NEON_SHAPE_DEF \
10454 X(3, (D, D, D), DOUBLE), \
10455 X(3, (Q, Q, Q), QUAD), \
10456 X(3, (D, D, I), DOUBLE), \
10457 X(3, (Q, Q, I), QUAD), \
10458 X(3, (D, D, S), DOUBLE), \
10459 X(3, (Q, Q, S), QUAD), \
10460 X(2, (D, D), DOUBLE), \
10461 X(2, (Q, Q), QUAD), \
10462 X(2, (D, S), DOUBLE), \
10463 X(2, (Q, S), QUAD), \
10464 X(2, (D, R), DOUBLE), \
10465 X(2, (Q, R), QUAD), \
10466 X(2, (D, I), DOUBLE), \
10467 X(2, (Q, I), QUAD), \
10468 X(3, (D, L, D), DOUBLE), \
10469 X(2, (D, Q), MIXED), \
10470 X(2, (Q, D), MIXED), \
10471 X(3, (D, Q, I), MIXED), \
10472 X(3, (Q, D, I), MIXED), \
10473 X(3, (Q, D, D), MIXED), \
10474 X(3, (D, Q, Q), MIXED), \
10475 X(3, (Q, Q, D), MIXED), \
10476 X(3, (Q, D, S), MIXED), \
10477 X(3, (D, Q, S), MIXED), \
10478 X(4, (D, D, D, I), DOUBLE), \
10479 X(4, (Q, Q, Q, I), QUAD), \
10480 X(2, (F, F), SINGLE), \
10481 X(3, (F, F, F), SINGLE), \
10482 X(2, (F, I), SINGLE), \
10483 X(2, (F, D), MIXED), \
10484 X(2, (D, F), MIXED), \
10485 X(3, (F, F, I), MIXED), \
10486 X(4, (R, R, F, F), SINGLE), \
10487 X(4, (F, F, R, R), SINGLE), \
10488 X(3, (D, R, R), DOUBLE), \
10489 X(3, (R, R, D), DOUBLE), \
10490 X(2, (S, R), SINGLE), \
10491 X(2, (R, S), SINGLE), \
10492 X(2, (F, R), SINGLE), \
10493 X(2, (R, F), SINGLE)
10494
10495 #define S2(A,B) NS_##A##B
10496 #define S3(A,B,C) NS_##A##B##C
10497 #define S4(A,B,C,D) NS_##A##B##C##D
10498
10499 #define X(N, L, C) S##N L
10500
10501 enum neon_shape
10502 {
10503 NEON_SHAPE_DEF,
10504 NS_NULL
10505 };
10506
10507 #undef X
10508 #undef S2
10509 #undef S3
10510 #undef S4
10511
10512 enum neon_shape_class
10513 {
10514 SC_SINGLE,
10515 SC_DOUBLE,
10516 SC_QUAD,
10517 SC_MIXED
10518 };
10519
10520 #define X(N, L, C) SC_##C
10521
10522 static enum neon_shape_class neon_shape_class[] =
10523 {
10524 NEON_SHAPE_DEF
10525 };
10526
10527 #undef X
10528
10529 enum neon_shape_el
10530 {
10531 SE_F,
10532 SE_D,
10533 SE_Q,
10534 SE_I,
10535 SE_S,
10536 SE_R,
10537 SE_L
10538 };
10539
10540 /* Register widths of above. */
10541 static unsigned neon_shape_el_size[] =
10542 {
10543 32,
10544 64,
10545 128,
10546 0,
10547 32,
10548 32,
10549 0
10550 };
10551
10552 struct neon_shape_info
10553 {
10554 unsigned els;
10555 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
10556 };
10557
10558 #define S2(A,B) { SE_##A, SE_##B }
10559 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10560 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10561
10562 #define X(N, L, C) { N, S##N L }
10563
10564 static struct neon_shape_info neon_shape_tab[] =
10565 {
10566 NEON_SHAPE_DEF
10567 };
10568
10569 #undef X
10570 #undef S2
10571 #undef S3
10572 #undef S4
10573
10574 /* Bit masks used in type checking given instructions.
10575 'N_EQK' means the type must be the same as (or based on in some way) the key
10576 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10577 set, various other bits can be set as well in order to modify the meaning of
10578 the type constraint. */
10579
10580 enum neon_type_mask
10581 {
10582 N_S8 = 0x000001,
10583 N_S16 = 0x000002,
10584 N_S32 = 0x000004,
10585 N_S64 = 0x000008,
10586 N_U8 = 0x000010,
10587 N_U16 = 0x000020,
10588 N_U32 = 0x000040,
10589 N_U64 = 0x000080,
10590 N_I8 = 0x000100,
10591 N_I16 = 0x000200,
10592 N_I32 = 0x000400,
10593 N_I64 = 0x000800,
10594 N_8 = 0x001000,
10595 N_16 = 0x002000,
10596 N_32 = 0x004000,
10597 N_64 = 0x008000,
10598 N_P8 = 0x010000,
10599 N_P16 = 0x020000,
10600 N_F32 = 0x040000,
10601 N_F64 = 0x080000,
10602 N_KEY = 0x100000, /* key element (main type specifier). */
10603 N_EQK = 0x200000, /* given operand has the same type & size as the key. */
10604 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */
10605 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
10606 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
10607 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
10608 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10609 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
10610 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
10611 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10612 N_UTYP = 0,
10613 N_MAX_NONSPECIAL = N_F64
10614 };
10615
10616 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10617
10618 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10619 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10620 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10621 #define N_SUF_32 (N_SU_32 | N_F32)
10622 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10623 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10624
10625 /* Pass this as the first type argument to neon_check_type to ignore types
10626 altogether. */
10627 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10628
10629 /* Select a "shape" for the current instruction (describing register types or
10630 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10631 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10632 function of operand parsing, so this function doesn't need to be called.
10633 Shapes should be listed in order of decreasing length. */
10634
10635 static enum neon_shape
10636 neon_select_shape (enum neon_shape shape, ...)
10637 {
10638 va_list ap;
10639 enum neon_shape first_shape = shape;
10640
10641 /* Fix missing optional operands. FIXME: we don't know at this point how
10642 many arguments we should have, so this makes the assumption that we have
10643 > 1. This is true of all current Neon opcodes, I think, but may not be
10644 true in the future. */
10645 if (!inst.operands[1].present)
10646 inst.operands[1] = inst.operands[0];
10647
10648 va_start (ap, shape);
10649
10650 for (; shape != NS_NULL; shape = va_arg (ap, int))
10651 {
10652 unsigned j;
10653 int matches = 1;
10654
10655 for (j = 0; j < neon_shape_tab[shape].els; j++)
10656 {
10657 if (!inst.operands[j].present)
10658 {
10659 matches = 0;
10660 break;
10661 }
10662
10663 switch (neon_shape_tab[shape].el[j])
10664 {
10665 case SE_F:
10666 if (!(inst.operands[j].isreg
10667 && inst.operands[j].isvec
10668 && inst.operands[j].issingle
10669 && !inst.operands[j].isquad))
10670 matches = 0;
10671 break;
10672
10673 case SE_D:
10674 if (!(inst.operands[j].isreg
10675 && inst.operands[j].isvec
10676 && !inst.operands[j].isquad
10677 && !inst.operands[j].issingle))
10678 matches = 0;
10679 break;
10680
10681 case SE_R:
10682 if (!(inst.operands[j].isreg
10683 && !inst.operands[j].isvec))
10684 matches = 0;
10685 break;
10686
10687 case SE_Q:
10688 if (!(inst.operands[j].isreg
10689 && inst.operands[j].isvec
10690 && inst.operands[j].isquad
10691 && !inst.operands[j].issingle))
10692 matches = 0;
10693 break;
10694
10695 case SE_I:
10696 if (!(!inst.operands[j].isreg
10697 && !inst.operands[j].isscalar))
10698 matches = 0;
10699 break;
10700
10701 case SE_S:
10702 if (!(!inst.operands[j].isreg
10703 && inst.operands[j].isscalar))
10704 matches = 0;
10705 break;
10706
10707 case SE_L:
10708 break;
10709 }
10710 }
10711 if (matches)
10712 break;
10713 }
10714
10715 va_end (ap);
10716
10717 if (shape == NS_NULL && first_shape != NS_NULL)
10718 first_error (_("invalid instruction shape"));
10719
10720 return shape;
10721 }
10722
10723 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10724 means the Q bit should be set). */
10725
10726 static int
10727 neon_quad (enum neon_shape shape)
10728 {
10729 return neon_shape_class[shape] == SC_QUAD;
10730 }
10731
10732 static void
10733 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10734 unsigned *g_size)
10735 {
10736 /* Allow modification to be made to types which are constrained to be
10737 based on the key element, based on bits set alongside N_EQK. */
10738 if ((typebits & N_EQK) != 0)
10739 {
10740 if ((typebits & N_HLF) != 0)
10741 *g_size /= 2;
10742 else if ((typebits & N_DBL) != 0)
10743 *g_size *= 2;
10744 if ((typebits & N_SGN) != 0)
10745 *g_type = NT_signed;
10746 else if ((typebits & N_UNS) != 0)
10747 *g_type = NT_unsigned;
10748 else if ((typebits & N_INT) != 0)
10749 *g_type = NT_integer;
10750 else if ((typebits & N_FLT) != 0)
10751 *g_type = NT_float;
10752 else if ((typebits & N_SIZ) != 0)
10753 *g_type = NT_untyped;
10754 }
10755 }
10756
10757 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10758 operand type, i.e. the single type specified in a Neon instruction when it
10759 is the only one given. */
10760
10761 static struct neon_type_el
10762 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10763 {
10764 struct neon_type_el dest = *key;
10765
10766 assert ((thisarg & N_EQK) != 0);
10767
10768 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10769
10770 return dest;
10771 }
10772
10773 /* Convert Neon type and size into compact bitmask representation. */
10774
10775 static enum neon_type_mask
10776 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10777 {
10778 switch (type)
10779 {
10780 case NT_untyped:
10781 switch (size)
10782 {
10783 case 8: return N_8;
10784 case 16: return N_16;
10785 case 32: return N_32;
10786 case 64: return N_64;
10787 default: ;
10788 }
10789 break;
10790
10791 case NT_integer:
10792 switch (size)
10793 {
10794 case 8: return N_I8;
10795 case 16: return N_I16;
10796 case 32: return N_I32;
10797 case 64: return N_I64;
10798 default: ;
10799 }
10800 break;
10801
10802 case NT_float:
10803 switch (size)
10804 {
10805 case 32: return N_F32;
10806 case 64: return N_F64;
10807 default: ;
10808 }
10809 break;
10810
10811 case NT_poly:
10812 switch (size)
10813 {
10814 case 8: return N_P8;
10815 case 16: return N_P16;
10816 default: ;
10817 }
10818 break;
10819
10820 case NT_signed:
10821 switch (size)
10822 {
10823 case 8: return N_S8;
10824 case 16: return N_S16;
10825 case 32: return N_S32;
10826 case 64: return N_S64;
10827 default: ;
10828 }
10829 break;
10830
10831 case NT_unsigned:
10832 switch (size)
10833 {
10834 case 8: return N_U8;
10835 case 16: return N_U16;
10836 case 32: return N_U32;
10837 case 64: return N_U64;
10838 default: ;
10839 }
10840 break;
10841
10842 default: ;
10843 }
10844
10845 return N_UTYP;
10846 }
10847
10848 /* Convert compact Neon bitmask type representation to a type and size. Only
10849 handles the case where a single bit is set in the mask. */
10850
10851 static int
10852 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10853 enum neon_type_mask mask)
10854 {
10855 if ((mask & N_EQK) != 0)
10856 return FAIL;
10857
10858 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10859 *size = 8;
10860 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10861 *size = 16;
10862 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10863 *size = 32;
10864 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10865 *size = 64;
10866 else
10867 return FAIL;
10868
10869 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10870 *type = NT_signed;
10871 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10872 *type = NT_unsigned;
10873 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10874 *type = NT_integer;
10875 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10876 *type = NT_untyped;
10877 else if ((mask & (N_P8 | N_P16)) != 0)
10878 *type = NT_poly;
10879 else if ((mask & (N_F32 | N_F64)) != 0)
10880 *type = NT_float;
10881 else
10882 return FAIL;
10883
10884 return SUCCESS;
10885 }
10886
10887 /* Modify a bitmask of allowed types. This is only needed for type
10888 relaxation. */
10889
10890 static unsigned
10891 modify_types_allowed (unsigned allowed, unsigned mods)
10892 {
10893 unsigned size;
10894 enum neon_el_type type;
10895 unsigned destmask;
10896 int i;
10897
10898 destmask = 0;
10899
10900 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
10901 {
10902 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
10903 {
10904 neon_modify_type_size (mods, &type, &size);
10905 destmask |= type_chk_of_el_type (type, size);
10906 }
10907 }
10908
10909 return destmask;
10910 }
10911
10912 /* Check type and return type classification.
10913 The manual states (paraphrase): If one datatype is given, it indicates the
10914 type given in:
10915 - the second operand, if there is one
10916 - the operand, if there is no second operand
10917 - the result, if there are no operands.
10918 This isn't quite good enough though, so we use a concept of a "key" datatype
10919 which is set on a per-instruction basis, which is the one which matters when
10920 only one data type is written.
10921 Note: this function has side-effects (e.g. filling in missing operands). All
10922 Neon instructions should call it before performing bit encoding. */
10923
10924 static struct neon_type_el
10925 neon_check_type (unsigned els, enum neon_shape ns, ...)
10926 {
10927 va_list ap;
10928 unsigned i, pass, key_el = 0;
10929 unsigned types[NEON_MAX_TYPE_ELS];
10930 enum neon_el_type k_type = NT_invtype;
10931 unsigned k_size = -1u;
10932 struct neon_type_el badtype = {NT_invtype, -1};
10933 unsigned key_allowed = 0;
10934
10935 /* Optional registers in Neon instructions are always (not) in operand 1.
10936 Fill in the missing operand here, if it was omitted. */
10937 if (els > 1 && !inst.operands[1].present)
10938 inst.operands[1] = inst.operands[0];
10939
10940 /* Suck up all the varargs. */
10941 va_start (ap, ns);
10942 for (i = 0; i < els; i++)
10943 {
10944 unsigned thisarg = va_arg (ap, unsigned);
10945 if (thisarg == N_IGNORE_TYPE)
10946 {
10947 va_end (ap);
10948 return badtype;
10949 }
10950 types[i] = thisarg;
10951 if ((thisarg & N_KEY) != 0)
10952 key_el = i;
10953 }
10954 va_end (ap);
10955
10956 if (inst.vectype.elems > 0)
10957 for (i = 0; i < els; i++)
10958 if (inst.operands[i].vectype.type != NT_invtype)
10959 {
10960 first_error (_("types specified in both the mnemonic and operands"));
10961 return badtype;
10962 }
10963
10964 /* Duplicate inst.vectype elements here as necessary.
10965 FIXME: No idea if this is exactly the same as the ARM assembler,
10966 particularly when an insn takes one register and one non-register
10967 operand. */
10968 if (inst.vectype.elems == 1 && els > 1)
10969 {
10970 unsigned j;
10971 inst.vectype.elems = els;
10972 inst.vectype.el[key_el] = inst.vectype.el[0];
10973 for (j = 0; j < els; j++)
10974 if (j != key_el)
10975 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10976 types[j]);
10977 }
10978 else if (inst.vectype.elems == 0 && els > 0)
10979 {
10980 unsigned j;
10981 /* No types were given after the mnemonic, so look for types specified
10982 after each operand. We allow some flexibility here; as long as the
10983 "key" operand has a type, we can infer the others. */
10984 for (j = 0; j < els; j++)
10985 if (inst.operands[j].vectype.type != NT_invtype)
10986 inst.vectype.el[j] = inst.operands[j].vectype;
10987
10988 if (inst.operands[key_el].vectype.type != NT_invtype)
10989 {
10990 for (j = 0; j < els; j++)
10991 if (inst.operands[j].vectype.type == NT_invtype)
10992 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10993 types[j]);
10994 }
10995 else
10996 {
10997 first_error (_("operand types can't be inferred"));
10998 return badtype;
10999 }
11000 }
11001 else if (inst.vectype.elems != els)
11002 {
11003 first_error (_("type specifier has the wrong number of parts"));
11004 return badtype;
11005 }
11006
11007 for (pass = 0; pass < 2; pass++)
11008 {
11009 for (i = 0; i < els; i++)
11010 {
11011 unsigned thisarg = types[i];
11012 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
11013 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
11014 enum neon_el_type g_type = inst.vectype.el[i].type;
11015 unsigned g_size = inst.vectype.el[i].size;
11016
11017 /* Decay more-specific signed & unsigned types to sign-insensitive
11018 integer types if sign-specific variants are unavailable. */
11019 if ((g_type == NT_signed || g_type == NT_unsigned)
11020 && (types_allowed & N_SU_ALL) == 0)
11021 g_type = NT_integer;
11022
11023 /* If only untyped args are allowed, decay any more specific types to
11024 them. Some instructions only care about signs for some element
11025 sizes, so handle that properly. */
11026 if ((g_size == 8 && (types_allowed & N_8) != 0)
11027 || (g_size == 16 && (types_allowed & N_16) != 0)
11028 || (g_size == 32 && (types_allowed & N_32) != 0)
11029 || (g_size == 64 && (types_allowed & N_64) != 0))
11030 g_type = NT_untyped;
11031
11032 if (pass == 0)
11033 {
11034 if ((thisarg & N_KEY) != 0)
11035 {
11036 k_type = g_type;
11037 k_size = g_size;
11038 key_allowed = thisarg & ~N_KEY;
11039 }
11040 }
11041 else
11042 {
11043 if ((thisarg & N_VFP) != 0)
11044 {
11045 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
11046 unsigned regwidth = neon_shape_el_size[regshape], match;
11047
11048 /* In VFP mode, operands must match register widths. If we
11049 have a key operand, use its width, else use the width of
11050 the current operand. */
11051 if (k_size != -1u)
11052 match = k_size;
11053 else
11054 match = g_size;
11055
11056 if (regwidth != match)
11057 {
11058 first_error (_("operand size must match register width"));
11059 return badtype;
11060 }
11061 }
11062
11063 if ((thisarg & N_EQK) == 0)
11064 {
11065 unsigned given_type = type_chk_of_el_type (g_type, g_size);
11066
11067 if ((given_type & types_allowed) == 0)
11068 {
11069 first_error (_("bad type in Neon instruction"));
11070 return badtype;
11071 }
11072 }
11073 else
11074 {
11075 enum neon_el_type mod_k_type = k_type;
11076 unsigned mod_k_size = k_size;
11077 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
11078 if (g_type != mod_k_type || g_size != mod_k_size)
11079 {
11080 first_error (_("inconsistent types in Neon instruction"));
11081 return badtype;
11082 }
11083 }
11084 }
11085 }
11086 }
11087
11088 return inst.vectype.el[key_el];
11089 }
11090
11091 /* Neon-style VFP instruction forwarding. */
11092
11093 /* Thumb VFP instructions have 0xE in the condition field. */
11094
11095 static void
11096 do_vfp_cond_or_thumb (void)
11097 {
11098 if (thumb_mode)
11099 inst.instruction |= 0xe0000000;
11100 else
11101 inst.instruction |= inst.cond << 28;
11102 }
11103
11104 /* Look up and encode a simple mnemonic, for use as a helper function for the
11105 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11106 etc. It is assumed that operand parsing has already been done, and that the
11107 operands are in the form expected by the given opcode (this isn't necessarily
11108 the same as the form in which they were parsed, hence some massaging must
11109 take place before this function is called).
11110 Checks current arch version against that in the looked-up opcode. */
11111
11112 static void
11113 do_vfp_nsyn_opcode (const char *opname)
11114 {
11115 const struct asm_opcode *opcode;
11116
11117 opcode = hash_find (arm_ops_hsh, opname);
11118
11119 if (!opcode)
11120 abort ();
11121
11122 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
11123 thumb_mode ? *opcode->tvariant : *opcode->avariant),
11124 _(BAD_FPU));
11125
11126 if (thumb_mode)
11127 {
11128 inst.instruction = opcode->tvalue;
11129 opcode->tencode ();
11130 }
11131 else
11132 {
11133 inst.instruction = (inst.cond << 28) | opcode->avalue;
11134 opcode->aencode ();
11135 }
11136 }
11137
11138 static void
11139 do_vfp_nsyn_add_sub (enum neon_shape rs)
11140 {
11141 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
11142
11143 if (rs == NS_FFF)
11144 {
11145 if (is_add)
11146 do_vfp_nsyn_opcode ("fadds");
11147 else
11148 do_vfp_nsyn_opcode ("fsubs");
11149 }
11150 else
11151 {
11152 if (is_add)
11153 do_vfp_nsyn_opcode ("faddd");
11154 else
11155 do_vfp_nsyn_opcode ("fsubd");
11156 }
11157 }
11158
11159 /* Check operand types to see if this is a VFP instruction, and if so call
11160 PFN (). */
11161
11162 static int
11163 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11164 {
11165 enum neon_shape rs;
11166 struct neon_type_el et;
11167
11168 switch (args)
11169 {
11170 case 2:
11171 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11172 et = neon_check_type (2, rs,
11173 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11174 break;
11175
11176 case 3:
11177 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11178 et = neon_check_type (3, rs,
11179 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11180 break;
11181
11182 default:
11183 abort ();
11184 }
11185
11186 if (et.type != NT_invtype)
11187 {
11188 pfn (rs);
11189 return SUCCESS;
11190 }
11191 else
11192 inst.error = NULL;
11193
11194 return FAIL;
11195 }
11196
11197 static void
11198 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11199 {
11200 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11201
11202 if (rs == NS_FFF)
11203 {
11204 if (is_mla)
11205 do_vfp_nsyn_opcode ("fmacs");
11206 else
11207 do_vfp_nsyn_opcode ("fmscs");
11208 }
11209 else
11210 {
11211 if (is_mla)
11212 do_vfp_nsyn_opcode ("fmacd");
11213 else
11214 do_vfp_nsyn_opcode ("fmscd");
11215 }
11216 }
11217
11218 static void
11219 do_vfp_nsyn_mul (enum neon_shape rs)
11220 {
11221 if (rs == NS_FFF)
11222 do_vfp_nsyn_opcode ("fmuls");
11223 else
11224 do_vfp_nsyn_opcode ("fmuld");
11225 }
11226
11227 static void
11228 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11229 {
11230 int is_neg = (inst.instruction & 0x80) != 0;
11231 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11232
11233 if (rs == NS_FF)
11234 {
11235 if (is_neg)
11236 do_vfp_nsyn_opcode ("fnegs");
11237 else
11238 do_vfp_nsyn_opcode ("fabss");
11239 }
11240 else
11241 {
11242 if (is_neg)
11243 do_vfp_nsyn_opcode ("fnegd");
11244 else
11245 do_vfp_nsyn_opcode ("fabsd");
11246 }
11247 }
11248
11249 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11250 insns belong to Neon, and are handled elsewhere. */
11251
11252 static void
11253 do_vfp_nsyn_ldm_stm (int is_dbmode)
11254 {
11255 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11256 if (is_ldm)
11257 {
11258 if (is_dbmode)
11259 do_vfp_nsyn_opcode ("fldmdbs");
11260 else
11261 do_vfp_nsyn_opcode ("fldmias");
11262 }
11263 else
11264 {
11265 if (is_dbmode)
11266 do_vfp_nsyn_opcode ("fstmdbs");
11267 else
11268 do_vfp_nsyn_opcode ("fstmias");
11269 }
11270 }
11271
11272 static void
11273 do_vfp_nsyn_sqrt (void)
11274 {
11275 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11276 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11277
11278 if (rs == NS_FF)
11279 do_vfp_nsyn_opcode ("fsqrts");
11280 else
11281 do_vfp_nsyn_opcode ("fsqrtd");
11282 }
11283
11284 static void
11285 do_vfp_nsyn_div (void)
11286 {
11287 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11288 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11289 N_F32 | N_F64 | N_KEY | N_VFP);
11290
11291 if (rs == NS_FFF)
11292 do_vfp_nsyn_opcode ("fdivs");
11293 else
11294 do_vfp_nsyn_opcode ("fdivd");
11295 }
11296
11297 static void
11298 do_vfp_nsyn_nmul (void)
11299 {
11300 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11301 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11302 N_F32 | N_F64 | N_KEY | N_VFP);
11303
11304 if (rs == NS_FFF)
11305 {
11306 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11307 do_vfp_sp_dyadic ();
11308 }
11309 else
11310 {
11311 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11312 do_vfp_dp_rd_rn_rm ();
11313 }
11314 do_vfp_cond_or_thumb ();
11315 }
11316
11317 static void
11318 do_vfp_nsyn_cmp (void)
11319 {
11320 if (inst.operands[1].isreg)
11321 {
11322 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11323 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11324
11325 if (rs == NS_FF)
11326 {
11327 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11328 do_vfp_sp_monadic ();
11329 }
11330 else
11331 {
11332 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11333 do_vfp_dp_rd_rm ();
11334 }
11335 }
11336 else
11337 {
11338 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11339 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11340
11341 switch (inst.instruction & 0x0fffffff)
11342 {
11343 case N_MNEM_vcmp:
11344 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11345 break;
11346 case N_MNEM_vcmpe:
11347 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11348 break;
11349 default:
11350 abort ();
11351 }
11352
11353 if (rs == NS_FI)
11354 {
11355 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11356 do_vfp_sp_compare_z ();
11357 }
11358 else
11359 {
11360 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11361 do_vfp_dp_rd ();
11362 }
11363 }
11364 do_vfp_cond_or_thumb ();
11365 }
11366
11367 static void
11368 nsyn_insert_sp (void)
11369 {
11370 inst.operands[1] = inst.operands[0];
11371 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11372 inst.operands[0].reg = 13;
11373 inst.operands[0].isreg = 1;
11374 inst.operands[0].writeback = 1;
11375 inst.operands[0].present = 1;
11376 }
11377
11378 static void
11379 do_vfp_nsyn_push (void)
11380 {
11381 nsyn_insert_sp ();
11382 if (inst.operands[1].issingle)
11383 do_vfp_nsyn_opcode ("fstmdbs");
11384 else
11385 do_vfp_nsyn_opcode ("fstmdbd");
11386 }
11387
11388 static void
11389 do_vfp_nsyn_pop (void)
11390 {
11391 nsyn_insert_sp ();
11392 if (inst.operands[1].issingle)
11393 do_vfp_nsyn_opcode ("fldmias");
11394 else
11395 do_vfp_nsyn_opcode ("fldmiad");
11396 }
11397
11398 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11399 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11400
11401 static unsigned
11402 neon_dp_fixup (unsigned i)
11403 {
11404 if (thumb_mode)
11405 {
11406 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11407 if (i & (1 << 24))
11408 i |= 1 << 28;
11409
11410 i &= ~(1 << 24);
11411
11412 i |= 0xef000000;
11413 }
11414 else
11415 i |= 0xf2000000;
11416
11417 return i;
11418 }
11419
11420 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11421 (0, 1, 2, 3). */
11422
11423 static unsigned
11424 neon_logbits (unsigned x)
11425 {
11426 return ffs (x) - 4;
11427 }
11428
11429 #define LOW4(R) ((R) & 0xf)
11430 #define HI1(R) (((R) >> 4) & 1)
11431
11432 /* Encode insns with bit pattern:
11433
11434 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11435 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11436
11437 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11438 different meaning for some instruction. */
11439
11440 static void
11441 neon_three_same (int isquad, int ubit, int size)
11442 {
11443 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11444 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11445 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11446 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11447 inst.instruction |= LOW4 (inst.operands[2].reg);
11448 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11449 inst.instruction |= (isquad != 0) << 6;
11450 inst.instruction |= (ubit != 0) << 24;
11451 if (size != -1)
11452 inst.instruction |= neon_logbits (size) << 20;
11453
11454 inst.instruction = neon_dp_fixup (inst.instruction);
11455 }
11456
11457 /* Encode instructions of the form:
11458
11459 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11460 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11461
11462 Don't write size if SIZE == -1. */
11463
11464 static void
11465 neon_two_same (int qbit, int ubit, int size)
11466 {
11467 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11468 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11469 inst.instruction |= LOW4 (inst.operands[1].reg);
11470 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11471 inst.instruction |= (qbit != 0) << 6;
11472 inst.instruction |= (ubit != 0) << 24;
11473
11474 if (size != -1)
11475 inst.instruction |= neon_logbits (size) << 18;
11476
11477 inst.instruction = neon_dp_fixup (inst.instruction);
11478 }
11479
11480 /* Neon instruction encoders, in approximate order of appearance. */
11481
11482 static void
11483 do_neon_dyadic_i_su (void)
11484 {
11485 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11486 struct neon_type_el et = neon_check_type (3, rs,
11487 N_EQK, N_EQK, N_SU_32 | N_KEY);
11488 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11489 }
11490
11491 static void
11492 do_neon_dyadic_i64_su (void)
11493 {
11494 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11495 struct neon_type_el et = neon_check_type (3, rs,
11496 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11497 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11498 }
11499
11500 static void
11501 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
11502 unsigned immbits)
11503 {
11504 unsigned size = et.size >> 3;
11505 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11506 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11507 inst.instruction |= LOW4 (inst.operands[1].reg);
11508 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11509 inst.instruction |= (isquad != 0) << 6;
11510 inst.instruction |= immbits << 16;
11511 inst.instruction |= (size >> 3) << 7;
11512 inst.instruction |= (size & 0x7) << 19;
11513 if (write_ubit)
11514 inst.instruction |= (uval != 0) << 24;
11515
11516 inst.instruction = neon_dp_fixup (inst.instruction);
11517 }
11518
11519 static void
11520 do_neon_shl_imm (void)
11521 {
11522 if (!inst.operands[2].isreg)
11523 {
11524 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11525 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
11526 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11527 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
11528 }
11529 else
11530 {
11531 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11532 struct neon_type_el et = neon_check_type (3, rs,
11533 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11534 unsigned int tmp;
11535
11536 /* VSHL/VQSHL 3-register variants have syntax such as:
11537 vshl.xx Dd, Dm, Dn
11538 whereas other 3-register operations encoded by neon_three_same have
11539 syntax like:
11540 vadd.xx Dd, Dn, Dm
11541 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
11542 here. */
11543 tmp = inst.operands[2].reg;
11544 inst.operands[2].reg = inst.operands[1].reg;
11545 inst.operands[1].reg = tmp;
11546 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11547 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11548 }
11549 }
11550
11551 static void
11552 do_neon_qshl_imm (void)
11553 {
11554 if (!inst.operands[2].isreg)
11555 {
11556 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11557 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11558
11559 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11560 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
11561 inst.operands[2].imm);
11562 }
11563 else
11564 {
11565 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11566 struct neon_type_el et = neon_check_type (3, rs,
11567 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11568 unsigned int tmp;
11569
11570 /* See note in do_neon_shl_imm. */
11571 tmp = inst.operands[2].reg;
11572 inst.operands[2].reg = inst.operands[1].reg;
11573 inst.operands[1].reg = tmp;
11574 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11575 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11576 }
11577 }
11578
11579 static void
11580 do_neon_rshl (void)
11581 {
11582 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11583 struct neon_type_el et = neon_check_type (3, rs,
11584 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11585 unsigned int tmp;
11586
11587 tmp = inst.operands[2].reg;
11588 inst.operands[2].reg = inst.operands[1].reg;
11589 inst.operands[1].reg = tmp;
11590 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11591 }
11592
11593 static int
11594 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
11595 {
11596 /* Handle .I8 pseudo-instructions. */
11597 if (size == 8)
11598 {
11599 /* Unfortunately, this will make everything apart from zero out-of-range.
11600 FIXME is this the intended semantics? There doesn't seem much point in
11601 accepting .I8 if so. */
11602 immediate |= immediate << 8;
11603 size = 16;
11604 }
11605
11606 if (size >= 32)
11607 {
11608 if (immediate == (immediate & 0x000000ff))
11609 {
11610 *immbits = immediate;
11611 return 0x1;
11612 }
11613 else if (immediate == (immediate & 0x0000ff00))
11614 {
11615 *immbits = immediate >> 8;
11616 return 0x3;
11617 }
11618 else if (immediate == (immediate & 0x00ff0000))
11619 {
11620 *immbits = immediate >> 16;
11621 return 0x5;
11622 }
11623 else if (immediate == (immediate & 0xff000000))
11624 {
11625 *immbits = immediate >> 24;
11626 return 0x7;
11627 }
11628 if ((immediate & 0xffff) != (immediate >> 16))
11629 goto bad_immediate;
11630 immediate &= 0xffff;
11631 }
11632
11633 if (immediate == (immediate & 0x000000ff))
11634 {
11635 *immbits = immediate;
11636 return 0x9;
11637 }
11638 else if (immediate == (immediate & 0x0000ff00))
11639 {
11640 *immbits = immediate >> 8;
11641 return 0xb;
11642 }
11643
11644 bad_immediate:
11645 first_error (_("immediate value out of range"));
11646 return FAIL;
11647 }
11648
11649 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11650 A, B, C, D. */
11651
11652 static int
11653 neon_bits_same_in_bytes (unsigned imm)
11654 {
11655 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
11656 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
11657 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
11658 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
11659 }
11660
11661 /* For immediate of above form, return 0bABCD. */
11662
11663 static unsigned
11664 neon_squash_bits (unsigned imm)
11665 {
11666 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
11667 | ((imm & 0x01000000) >> 21);
11668 }
11669
11670 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11671
11672 static unsigned
11673 neon_qfloat_bits (unsigned imm)
11674 {
11675 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
11676 }
11677
11678 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11679 the instruction. *OP is passed as the initial value of the op field, and
11680 may be set to a different value depending on the constant (i.e.
11681 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11682 MVN). If the immediate looks like a repeated parttern then also
11683 try smaller element sizes. */
11684
11685 static int
11686 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
11687 unsigned *immbits, int *op, int size,
11688 enum neon_el_type type)
11689 {
11690 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
11691 float. */
11692 if (type == NT_float && !float_p)
11693 return FAIL;
11694
11695 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11696 {
11697 if (size != 32 || *op == 1)
11698 return FAIL;
11699 *immbits = neon_qfloat_bits (immlo);
11700 return 0xf;
11701 }
11702
11703 if (size == 64)
11704 {
11705 if (neon_bits_same_in_bytes (immhi)
11706 && neon_bits_same_in_bytes (immlo))
11707 {
11708 if (*op == 1)
11709 return FAIL;
11710 *immbits = (neon_squash_bits (immhi) << 4)
11711 | neon_squash_bits (immlo);
11712 *op = 1;
11713 return 0xe;
11714 }
11715
11716 if (immhi != immlo)
11717 return FAIL;
11718 }
11719
11720 if (size >= 32)
11721 {
11722 if (immlo == (immlo & 0x000000ff))
11723 {
11724 *immbits = immlo;
11725 return 0x0;
11726 }
11727 else if (immlo == (immlo & 0x0000ff00))
11728 {
11729 *immbits = immlo >> 8;
11730 return 0x2;
11731 }
11732 else if (immlo == (immlo & 0x00ff0000))
11733 {
11734 *immbits = immlo >> 16;
11735 return 0x4;
11736 }
11737 else if (immlo == (immlo & 0xff000000))
11738 {
11739 *immbits = immlo >> 24;
11740 return 0x6;
11741 }
11742 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11743 {
11744 *immbits = (immlo >> 8) & 0xff;
11745 return 0xc;
11746 }
11747 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11748 {
11749 *immbits = (immlo >> 16) & 0xff;
11750 return 0xd;
11751 }
11752
11753 if ((immlo & 0xffff) != (immlo >> 16))
11754 return FAIL;
11755 immlo &= 0xffff;
11756 }
11757
11758 if (size >= 16)
11759 {
11760 if (immlo == (immlo & 0x000000ff))
11761 {
11762 *immbits = immlo;
11763 return 0x8;
11764 }
11765 else if (immlo == (immlo & 0x0000ff00))
11766 {
11767 *immbits = immlo >> 8;
11768 return 0xa;
11769 }
11770
11771 if ((immlo & 0xff) != (immlo >> 8))
11772 return FAIL;
11773 immlo &= 0xff;
11774 }
11775
11776 if (immlo == (immlo & 0x000000ff))
11777 {
11778 /* Don't allow MVN with 8-bit immediate. */
11779 if (*op == 1)
11780 return FAIL;
11781 *immbits = immlo;
11782 return 0xe;
11783 }
11784
11785 return FAIL;
11786 }
11787
11788 /* Write immediate bits [7:0] to the following locations:
11789
11790 |28/24|23 19|18 16|15 4|3 0|
11791 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11792
11793 This function is used by VMOV/VMVN/VORR/VBIC. */
11794
11795 static void
11796 neon_write_immbits (unsigned immbits)
11797 {
11798 inst.instruction |= immbits & 0xf;
11799 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11800 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11801 }
11802
11803 /* Invert low-order SIZE bits of XHI:XLO. */
11804
11805 static void
11806 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11807 {
11808 unsigned immlo = xlo ? *xlo : 0;
11809 unsigned immhi = xhi ? *xhi : 0;
11810
11811 switch (size)
11812 {
11813 case 8:
11814 immlo = (~immlo) & 0xff;
11815 break;
11816
11817 case 16:
11818 immlo = (~immlo) & 0xffff;
11819 break;
11820
11821 case 64:
11822 immhi = (~immhi) & 0xffffffff;
11823 /* fall through. */
11824
11825 case 32:
11826 immlo = (~immlo) & 0xffffffff;
11827 break;
11828
11829 default:
11830 abort ();
11831 }
11832
11833 if (xlo)
11834 *xlo = immlo;
11835
11836 if (xhi)
11837 *xhi = immhi;
11838 }
11839
11840 static void
11841 do_neon_logic (void)
11842 {
11843 if (inst.operands[2].present && inst.operands[2].isreg)
11844 {
11845 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11846 neon_check_type (3, rs, N_IGNORE_TYPE);
11847 /* U bit and size field were set as part of the bitmask. */
11848 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11849 neon_three_same (neon_quad (rs), 0, -1);
11850 }
11851 else
11852 {
11853 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11854 struct neon_type_el et = neon_check_type (2, rs,
11855 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11856 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11857 unsigned immbits;
11858 int cmode;
11859
11860 if (et.type == NT_invtype)
11861 return;
11862
11863 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11864
11865 immbits = inst.operands[1].imm;
11866 if (et.size == 64)
11867 {
11868 /* .i64 is a pseudo-op, so the immediate must be a repeating
11869 pattern. */
11870 if (immbits != (inst.operands[1].regisimm ?
11871 inst.operands[1].reg : 0))
11872 {
11873 /* Set immbits to an invalid constant. */
11874 immbits = 0xdeadbeef;
11875 }
11876 }
11877
11878 switch (opcode)
11879 {
11880 case N_MNEM_vbic:
11881 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11882 break;
11883
11884 case N_MNEM_vorr:
11885 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11886 break;
11887
11888 case N_MNEM_vand:
11889 /* Pseudo-instruction for VBIC. */
11890 neon_invert_size (&immbits, 0, et.size);
11891 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11892 break;
11893
11894 case N_MNEM_vorn:
11895 /* Pseudo-instruction for VORR. */
11896 neon_invert_size (&immbits, 0, et.size);
11897 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11898 break;
11899
11900 default:
11901 abort ();
11902 }
11903
11904 if (cmode == FAIL)
11905 return;
11906
11907 inst.instruction |= neon_quad (rs) << 6;
11908 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11909 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11910 inst.instruction |= cmode << 8;
11911 neon_write_immbits (immbits);
11912
11913 inst.instruction = neon_dp_fixup (inst.instruction);
11914 }
11915 }
11916
11917 static void
11918 do_neon_bitfield (void)
11919 {
11920 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11921 neon_check_type (3, rs, N_IGNORE_TYPE);
11922 neon_three_same (neon_quad (rs), 0, -1);
11923 }
11924
11925 static void
11926 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
11927 unsigned destbits)
11928 {
11929 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11930 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
11931 types | N_KEY);
11932 if (et.type == NT_float)
11933 {
11934 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
11935 neon_three_same (neon_quad (rs), 0, -1);
11936 }
11937 else
11938 {
11939 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11940 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
11941 }
11942 }
11943
11944 static void
11945 do_neon_dyadic_if_su (void)
11946 {
11947 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11948 }
11949
11950 static void
11951 do_neon_dyadic_if_su_d (void)
11952 {
11953 /* This version only allow D registers, but that constraint is enforced during
11954 operand parsing so we don't need to do anything extra here. */
11955 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11956 }
11957
11958 static void
11959 do_neon_dyadic_if_i_d (void)
11960 {
11961 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11962 affected if we specify unsigned args. */
11963 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
11964 }
11965
11966 enum vfp_or_neon_is_neon_bits
11967 {
11968 NEON_CHECK_CC = 1,
11969 NEON_CHECK_ARCH = 2
11970 };
11971
11972 /* Call this function if an instruction which may have belonged to the VFP or
11973 Neon instruction sets, but turned out to be a Neon instruction (due to the
11974 operand types involved, etc.). We have to check and/or fix-up a couple of
11975 things:
11976
11977 - Make sure the user hasn't attempted to make a Neon instruction
11978 conditional.
11979 - Alter the value in the condition code field if necessary.
11980 - Make sure that the arch supports Neon instructions.
11981
11982 Which of these operations take place depends on bits from enum
11983 vfp_or_neon_is_neon_bits.
11984
11985 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11986 current instruction's condition is COND_ALWAYS, the condition field is
11987 changed to inst.uncond_value. This is necessary because instructions shared
11988 between VFP and Neon may be conditional for the VFP variants only, and the
11989 unconditional Neon version must have, e.g., 0xF in the condition field. */
11990
11991 static int
11992 vfp_or_neon_is_neon (unsigned check)
11993 {
11994 /* Conditions are always legal in Thumb mode (IT blocks). */
11995 if (!thumb_mode && (check & NEON_CHECK_CC))
11996 {
11997 if (inst.cond != COND_ALWAYS)
11998 {
11999 first_error (_(BAD_COND));
12000 return FAIL;
12001 }
12002 if (inst.uncond_value != -1)
12003 inst.instruction |= inst.uncond_value << 28;
12004 }
12005
12006 if ((check & NEON_CHECK_ARCH)
12007 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
12008 {
12009 first_error (_(BAD_FPU));
12010 return FAIL;
12011 }
12012
12013 return SUCCESS;
12014 }
12015
12016 static void
12017 do_neon_addsub_if_i (void)
12018 {
12019 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
12020 return;
12021
12022 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12023 return;
12024
12025 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12026 affected if we specify unsigned args. */
12027 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
12028 }
12029
12030 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12031 result to be:
12032 V<op> A,B (A is operand 0, B is operand 2)
12033 to mean:
12034 V<op> A,B,A
12035 not:
12036 V<op> A,B,B
12037 so handle that case specially. */
12038
12039 static void
12040 neon_exchange_operands (void)
12041 {
12042 void *scratch = alloca (sizeof (inst.operands[0]));
12043 if (inst.operands[1].present)
12044 {
12045 /* Swap operands[1] and operands[2]. */
12046 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
12047 inst.operands[1] = inst.operands[2];
12048 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
12049 }
12050 else
12051 {
12052 inst.operands[1] = inst.operands[2];
12053 inst.operands[2] = inst.operands[0];
12054 }
12055 }
12056
12057 static void
12058 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
12059 {
12060 if (inst.operands[2].isreg)
12061 {
12062 if (invert)
12063 neon_exchange_operands ();
12064 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
12065 }
12066 else
12067 {
12068 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12069 struct neon_type_el et = neon_check_type (2, rs,
12070 N_EQK | N_SIZ, immtypes | N_KEY);
12071
12072 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12073 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12074 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12075 inst.instruction |= LOW4 (inst.operands[1].reg);
12076 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12077 inst.instruction |= neon_quad (rs) << 6;
12078 inst.instruction |= (et.type == NT_float) << 10;
12079 inst.instruction |= neon_logbits (et.size) << 18;
12080
12081 inst.instruction = neon_dp_fixup (inst.instruction);
12082 }
12083 }
12084
12085 static void
12086 do_neon_cmp (void)
12087 {
12088 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
12089 }
12090
12091 static void
12092 do_neon_cmp_inv (void)
12093 {
12094 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
12095 }
12096
12097 static void
12098 do_neon_ceq (void)
12099 {
12100 neon_compare (N_IF_32, N_IF_32, FALSE);
12101 }
12102
12103 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12104 scalars, which are encoded in 5 bits, M : Rm.
12105 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12106 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12107 index in M. */
12108
12109 static unsigned
12110 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
12111 {
12112 unsigned regno = NEON_SCALAR_REG (scalar);
12113 unsigned elno = NEON_SCALAR_INDEX (scalar);
12114
12115 switch (elsize)
12116 {
12117 case 16:
12118 if (regno > 7 || elno > 3)
12119 goto bad_scalar;
12120 return regno | (elno << 3);
12121
12122 case 32:
12123 if (regno > 15 || elno > 1)
12124 goto bad_scalar;
12125 return regno | (elno << 4);
12126
12127 default:
12128 bad_scalar:
12129 first_error (_("scalar out of range for multiply instruction"));
12130 }
12131
12132 return 0;
12133 }
12134
12135 /* Encode multiply / multiply-accumulate scalar instructions. */
12136
12137 static void
12138 neon_mul_mac (struct neon_type_el et, int ubit)
12139 {
12140 unsigned scalar;
12141
12142 /* Give a more helpful error message if we have an invalid type. */
12143 if (et.type == NT_invtype)
12144 return;
12145
12146 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
12147 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12148 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12149 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12150 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12151 inst.instruction |= LOW4 (scalar);
12152 inst.instruction |= HI1 (scalar) << 5;
12153 inst.instruction |= (et.type == NT_float) << 8;
12154 inst.instruction |= neon_logbits (et.size) << 20;
12155 inst.instruction |= (ubit != 0) << 24;
12156
12157 inst.instruction = neon_dp_fixup (inst.instruction);
12158 }
12159
12160 static void
12161 do_neon_mac_maybe_scalar (void)
12162 {
12163 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
12164 return;
12165
12166 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12167 return;
12168
12169 if (inst.operands[2].isscalar)
12170 {
12171 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12172 struct neon_type_el et = neon_check_type (3, rs,
12173 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
12174 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12175 neon_mul_mac (et, neon_quad (rs));
12176 }
12177 else
12178 {
12179 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12180 affected if we specify unsigned args. */
12181 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12182 }
12183 }
12184
12185 static void
12186 do_neon_tst (void)
12187 {
12188 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12189 struct neon_type_el et = neon_check_type (3, rs,
12190 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12191 neon_three_same (neon_quad (rs), 0, et.size);
12192 }
12193
12194 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12195 same types as the MAC equivalents. The polynomial type for this instruction
12196 is encoded the same as the integer type. */
12197
12198 static void
12199 do_neon_mul (void)
12200 {
12201 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12202 return;
12203
12204 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12205 return;
12206
12207 if (inst.operands[2].isscalar)
12208 do_neon_mac_maybe_scalar ();
12209 else
12210 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12211 }
12212
12213 static void
12214 do_neon_qdmulh (void)
12215 {
12216 if (inst.operands[2].isscalar)
12217 {
12218 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12219 struct neon_type_el et = neon_check_type (3, rs,
12220 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12221 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12222 neon_mul_mac (et, neon_quad (rs));
12223 }
12224 else
12225 {
12226 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12227 struct neon_type_el et = neon_check_type (3, rs,
12228 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12229 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12230 /* The U bit (rounding) comes from bit mask. */
12231 neon_three_same (neon_quad (rs), 0, et.size);
12232 }
12233 }
12234
12235 static void
12236 do_neon_fcmp_absolute (void)
12237 {
12238 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12239 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12240 /* Size field comes from bit mask. */
12241 neon_three_same (neon_quad (rs), 1, -1);
12242 }
12243
12244 static void
12245 do_neon_fcmp_absolute_inv (void)
12246 {
12247 neon_exchange_operands ();
12248 do_neon_fcmp_absolute ();
12249 }
12250
12251 static void
12252 do_neon_step (void)
12253 {
12254 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12255 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12256 neon_three_same (neon_quad (rs), 0, -1);
12257 }
12258
12259 static void
12260 do_neon_abs_neg (void)
12261 {
12262 enum neon_shape rs;
12263 struct neon_type_el et;
12264
12265 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12266 return;
12267
12268 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12269 return;
12270
12271 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12272 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12273
12274 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12275 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12276 inst.instruction |= LOW4 (inst.operands[1].reg);
12277 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12278 inst.instruction |= neon_quad (rs) << 6;
12279 inst.instruction |= (et.type == NT_float) << 10;
12280 inst.instruction |= neon_logbits (et.size) << 18;
12281
12282 inst.instruction = neon_dp_fixup (inst.instruction);
12283 }
12284
12285 static void
12286 do_neon_sli (void)
12287 {
12288 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12289 struct neon_type_el et = neon_check_type (2, rs,
12290 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12291 int imm = inst.operands[2].imm;
12292 constraint (imm < 0 || (unsigned)imm >= et.size,
12293 _("immediate out of range for insert"));
12294 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12295 }
12296
12297 static void
12298 do_neon_sri (void)
12299 {
12300 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12301 struct neon_type_el et = neon_check_type (2, rs,
12302 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12303 int imm = inst.operands[2].imm;
12304 constraint (imm < 1 || (unsigned)imm > et.size,
12305 _("immediate out of range for insert"));
12306 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12307 }
12308
12309 static void
12310 do_neon_qshlu_imm (void)
12311 {
12312 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12313 struct neon_type_el et = neon_check_type (2, rs,
12314 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12315 int imm = inst.operands[2].imm;
12316 constraint (imm < 0 || (unsigned)imm >= et.size,
12317 _("immediate out of range for shift"));
12318 /* Only encodes the 'U present' variant of the instruction.
12319 In this case, signed types have OP (bit 8) set to 0.
12320 Unsigned types have OP set to 1. */
12321 inst.instruction |= (et.type == NT_unsigned) << 8;
12322 /* The rest of the bits are the same as other immediate shifts. */
12323 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12324 }
12325
12326 static void
12327 do_neon_qmovn (void)
12328 {
12329 struct neon_type_el et = neon_check_type (2, NS_DQ,
12330 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12331 /* Saturating move where operands can be signed or unsigned, and the
12332 destination has the same signedness. */
12333 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12334 if (et.type == NT_unsigned)
12335 inst.instruction |= 0xc0;
12336 else
12337 inst.instruction |= 0x80;
12338 neon_two_same (0, 1, et.size / 2);
12339 }
12340
12341 static void
12342 do_neon_qmovun (void)
12343 {
12344 struct neon_type_el et = neon_check_type (2, NS_DQ,
12345 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12346 /* Saturating move with unsigned results. Operands must be signed. */
12347 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12348 neon_two_same (0, 1, et.size / 2);
12349 }
12350
12351 static void
12352 do_neon_rshift_sat_narrow (void)
12353 {
12354 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12355 or unsigned. If operands are unsigned, results must also be unsigned. */
12356 struct neon_type_el et = neon_check_type (2, NS_DQI,
12357 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12358 int imm = inst.operands[2].imm;
12359 /* This gets the bounds check, size encoding and immediate bits calculation
12360 right. */
12361 et.size /= 2;
12362
12363 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12364 VQMOVN.I<size> <Dd>, <Qm>. */
12365 if (imm == 0)
12366 {
12367 inst.operands[2].present = 0;
12368 inst.instruction = N_MNEM_vqmovn;
12369 do_neon_qmovn ();
12370 return;
12371 }
12372
12373 constraint (imm < 1 || (unsigned)imm > et.size,
12374 _("immediate out of range"));
12375 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12376 }
12377
12378 static void
12379 do_neon_rshift_sat_narrow_u (void)
12380 {
12381 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12382 or unsigned. If operands are unsigned, results must also be unsigned. */
12383 struct neon_type_el et = neon_check_type (2, NS_DQI,
12384 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12385 int imm = inst.operands[2].imm;
12386 /* This gets the bounds check, size encoding and immediate bits calculation
12387 right. */
12388 et.size /= 2;
12389
12390 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12391 VQMOVUN.I<size> <Dd>, <Qm>. */
12392 if (imm == 0)
12393 {
12394 inst.operands[2].present = 0;
12395 inst.instruction = N_MNEM_vqmovun;
12396 do_neon_qmovun ();
12397 return;
12398 }
12399
12400 constraint (imm < 1 || (unsigned)imm > et.size,
12401 _("immediate out of range"));
12402 /* FIXME: The manual is kind of unclear about what value U should have in
12403 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12404 must be 1. */
12405 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12406 }
12407
12408 static void
12409 do_neon_movn (void)
12410 {
12411 struct neon_type_el et = neon_check_type (2, NS_DQ,
12412 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12413 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12414 neon_two_same (0, 1, et.size / 2);
12415 }
12416
12417 static void
12418 do_neon_rshift_narrow (void)
12419 {
12420 struct neon_type_el et = neon_check_type (2, NS_DQI,
12421 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12422 int imm = inst.operands[2].imm;
12423 /* This gets the bounds check, size encoding and immediate bits calculation
12424 right. */
12425 et.size /= 2;
12426
12427 /* If immediate is zero then we are a pseudo-instruction for
12428 VMOVN.I<size> <Dd>, <Qm> */
12429 if (imm == 0)
12430 {
12431 inst.operands[2].present = 0;
12432 inst.instruction = N_MNEM_vmovn;
12433 do_neon_movn ();
12434 return;
12435 }
12436
12437 constraint (imm < 1 || (unsigned)imm > et.size,
12438 _("immediate out of range for narrowing operation"));
12439 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12440 }
12441
12442 static void
12443 do_neon_shll (void)
12444 {
12445 /* FIXME: Type checking when lengthening. */
12446 struct neon_type_el et = neon_check_type (2, NS_QDI,
12447 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12448 unsigned imm = inst.operands[2].imm;
12449
12450 if (imm == et.size)
12451 {
12452 /* Maximum shift variant. */
12453 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12454 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12455 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12456 inst.instruction |= LOW4 (inst.operands[1].reg);
12457 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12458 inst.instruction |= neon_logbits (et.size) << 18;
12459
12460 inst.instruction = neon_dp_fixup (inst.instruction);
12461 }
12462 else
12463 {
12464 /* A more-specific type check for non-max versions. */
12465 et = neon_check_type (2, NS_QDI,
12466 N_EQK | N_DBL, N_SU_32 | N_KEY);
12467 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12468 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12469 }
12470 }
12471
12472 /* Check the various types for the VCVT instruction, and return which version
12473 the current instruction is. */
12474
12475 static int
12476 neon_cvt_flavour (enum neon_shape rs)
12477 {
12478 #define CVT_VAR(C,X,Y) \
12479 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12480 if (et.type != NT_invtype) \
12481 { \
12482 inst.error = NULL; \
12483 return (C); \
12484 }
12485 struct neon_type_el et;
12486 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12487 || rs == NS_FF) ? N_VFP : 0;
12488 /* The instruction versions which take an immediate take one register
12489 argument, which is extended to the width of the full register. Thus the
12490 "source" and "destination" registers must have the same width. Hack that
12491 here by making the size equal to the key (wider, in this case) operand. */
12492 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
12493
12494 CVT_VAR (0, N_S32, N_F32);
12495 CVT_VAR (1, N_U32, N_F32);
12496 CVT_VAR (2, N_F32, N_S32);
12497 CVT_VAR (3, N_F32, N_U32);
12498
12499 whole_reg = N_VFP;
12500
12501 /* VFP instructions. */
12502 CVT_VAR (4, N_F32, N_F64);
12503 CVT_VAR (5, N_F64, N_F32);
12504 CVT_VAR (6, N_S32, N_F64 | key);
12505 CVT_VAR (7, N_U32, N_F64 | key);
12506 CVT_VAR (8, N_F64 | key, N_S32);
12507 CVT_VAR (9, N_F64 | key, N_U32);
12508 /* VFP instructions with bitshift. */
12509 CVT_VAR (10, N_F32 | key, N_S16);
12510 CVT_VAR (11, N_F32 | key, N_U16);
12511 CVT_VAR (12, N_F64 | key, N_S16);
12512 CVT_VAR (13, N_F64 | key, N_U16);
12513 CVT_VAR (14, N_S16, N_F32 | key);
12514 CVT_VAR (15, N_U16, N_F32 | key);
12515 CVT_VAR (16, N_S16, N_F64 | key);
12516 CVT_VAR (17, N_U16, N_F64 | key);
12517
12518 return -1;
12519 #undef CVT_VAR
12520 }
12521
12522 /* Neon-syntax VFP conversions. */
12523
12524 static void
12525 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
12526 {
12527 const char *opname = 0;
12528
12529 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
12530 {
12531 /* Conversions with immediate bitshift. */
12532 const char *enc[] =
12533 {
12534 "ftosls",
12535 "ftouls",
12536 "fsltos",
12537 "fultos",
12538 NULL,
12539 NULL,
12540 "ftosld",
12541 "ftould",
12542 "fsltod",
12543 "fultod",
12544 "fshtos",
12545 "fuhtos",
12546 "fshtod",
12547 "fuhtod",
12548 "ftoshs",
12549 "ftouhs",
12550 "ftoshd",
12551 "ftouhd"
12552 };
12553
12554 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12555 {
12556 opname = enc[flavour];
12557 constraint (inst.operands[0].reg != inst.operands[1].reg,
12558 _("operands 0 and 1 must be the same register"));
12559 inst.operands[1] = inst.operands[2];
12560 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
12561 }
12562 }
12563 else
12564 {
12565 /* Conversions without bitshift. */
12566 const char *enc[] =
12567 {
12568 "ftosis",
12569 "ftouis",
12570 "fsitos",
12571 "fuitos",
12572 "fcvtsd",
12573 "fcvtds",
12574 "ftosid",
12575 "ftouid",
12576 "fsitod",
12577 "fuitod"
12578 };
12579
12580 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12581 opname = enc[flavour];
12582 }
12583
12584 if (opname)
12585 do_vfp_nsyn_opcode (opname);
12586 }
12587
12588 static void
12589 do_vfp_nsyn_cvtz (void)
12590 {
12591 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
12592 int flavour = neon_cvt_flavour (rs);
12593 const char *enc[] =
12594 {
12595 "ftosizs",
12596 "ftouizs",
12597 NULL,
12598 NULL,
12599 NULL,
12600 NULL,
12601 "ftosizd",
12602 "ftouizd"
12603 };
12604
12605 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
12606 do_vfp_nsyn_opcode (enc[flavour]);
12607 }
12608
12609 static void
12610 do_neon_cvt (void)
12611 {
12612 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
12613 NS_FD, NS_DF, NS_FF, NS_NULL);
12614 int flavour = neon_cvt_flavour (rs);
12615
12616 /* VFP rather than Neon conversions. */
12617 if (flavour >= 4)
12618 {
12619 do_vfp_nsyn_cvt (rs, flavour);
12620 return;
12621 }
12622
12623 switch (rs)
12624 {
12625 case NS_DDI:
12626 case NS_QQI:
12627 {
12628 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12629 return;
12630
12631 /* Fixed-point conversion with #0 immediate is encoded as an
12632 integer conversion. */
12633 if (inst.operands[2].present && inst.operands[2].imm == 0)
12634 goto int_encode;
12635 unsigned immbits = 32 - inst.operands[2].imm;
12636 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12637 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12638 if (flavour != -1)
12639 inst.instruction |= enctab[flavour];
12640 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12641 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12642 inst.instruction |= LOW4 (inst.operands[1].reg);
12643 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12644 inst.instruction |= neon_quad (rs) << 6;
12645 inst.instruction |= 1 << 21;
12646 inst.instruction |= immbits << 16;
12647
12648 inst.instruction = neon_dp_fixup (inst.instruction);
12649 }
12650 break;
12651
12652 case NS_DD:
12653 case NS_QQ:
12654 int_encode:
12655 {
12656 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
12657
12658 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12659
12660 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12661 return;
12662
12663 if (flavour != -1)
12664 inst.instruction |= enctab[flavour];
12665
12666 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12667 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12668 inst.instruction |= LOW4 (inst.operands[1].reg);
12669 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12670 inst.instruction |= neon_quad (rs) << 6;
12671 inst.instruction |= 2 << 18;
12672
12673 inst.instruction = neon_dp_fixup (inst.instruction);
12674 }
12675 break;
12676
12677 default:
12678 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12679 do_vfp_nsyn_cvt (rs, flavour);
12680 }
12681 }
12682
12683 static void
12684 neon_move_immediate (void)
12685 {
12686 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12687 struct neon_type_el et = neon_check_type (2, rs,
12688 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12689 unsigned immlo, immhi = 0, immbits;
12690 int op, cmode, float_p;
12691
12692 constraint (et.type == NT_invtype,
12693 _("operand size must be specified for immediate VMOV"));
12694
12695 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12696 op = (inst.instruction & (1 << 5)) != 0;
12697
12698 immlo = inst.operands[1].imm;
12699 if (inst.operands[1].regisimm)
12700 immhi = inst.operands[1].reg;
12701
12702 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
12703 _("immediate has bits set outside the operand size"));
12704
12705 float_p = inst.operands[1].immisfloat;
12706
12707 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
12708 et.size, et.type)) == FAIL)
12709 {
12710 /* Invert relevant bits only. */
12711 neon_invert_size (&immlo, &immhi, et.size);
12712 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12713 with one or the other; those cases are caught by
12714 neon_cmode_for_move_imm. */
12715 op = !op;
12716 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
12717 &op, et.size, et.type)) == FAIL)
12718 {
12719 first_error (_("immediate out of range"));
12720 return;
12721 }
12722 }
12723
12724 inst.instruction &= ~(1 << 5);
12725 inst.instruction |= op << 5;
12726
12727 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12728 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12729 inst.instruction |= neon_quad (rs) << 6;
12730 inst.instruction |= cmode << 8;
12731
12732 neon_write_immbits (immbits);
12733 }
12734
12735 static void
12736 do_neon_mvn (void)
12737 {
12738 if (inst.operands[1].isreg)
12739 {
12740 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12741
12742 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12743 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12744 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12745 inst.instruction |= LOW4 (inst.operands[1].reg);
12746 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12747 inst.instruction |= neon_quad (rs) << 6;
12748 }
12749 else
12750 {
12751 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12752 neon_move_immediate ();
12753 }
12754
12755 inst.instruction = neon_dp_fixup (inst.instruction);
12756 }
12757
12758 /* Encode instructions of form:
12759
12760 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12761 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12762
12763 */
12764
12765 static void
12766 neon_mixed_length (struct neon_type_el et, unsigned size)
12767 {
12768 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12769 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12770 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12771 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12772 inst.instruction |= LOW4 (inst.operands[2].reg);
12773 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12774 inst.instruction |= (et.type == NT_unsigned) << 24;
12775 inst.instruction |= neon_logbits (size) << 20;
12776
12777 inst.instruction = neon_dp_fixup (inst.instruction);
12778 }
12779
12780 static void
12781 do_neon_dyadic_long (void)
12782 {
12783 /* FIXME: Type checking for lengthening op. */
12784 struct neon_type_el et = neon_check_type (3, NS_QDD,
12785 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12786 neon_mixed_length (et, et.size);
12787 }
12788
12789 static void
12790 do_neon_abal (void)
12791 {
12792 struct neon_type_el et = neon_check_type (3, NS_QDD,
12793 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12794 neon_mixed_length (et, et.size);
12795 }
12796
12797 static void
12798 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12799 {
12800 if (inst.operands[2].isscalar)
12801 {
12802 struct neon_type_el et = neon_check_type (3, NS_QDS,
12803 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12804 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12805 neon_mul_mac (et, et.type == NT_unsigned);
12806 }
12807 else
12808 {
12809 struct neon_type_el et = neon_check_type (3, NS_QDD,
12810 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12811 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12812 neon_mixed_length (et, et.size);
12813 }
12814 }
12815
12816 static void
12817 do_neon_mac_maybe_scalar_long (void)
12818 {
12819 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12820 }
12821
12822 static void
12823 do_neon_dyadic_wide (void)
12824 {
12825 struct neon_type_el et = neon_check_type (3, NS_QQD,
12826 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12827 neon_mixed_length (et, et.size);
12828 }
12829
12830 static void
12831 do_neon_dyadic_narrow (void)
12832 {
12833 struct neon_type_el et = neon_check_type (3, NS_QDD,
12834 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
12835 /* Operand sign is unimportant, and the U bit is part of the opcode,
12836 so force the operand type to integer. */
12837 et.type = NT_integer;
12838 neon_mixed_length (et, et.size / 2);
12839 }
12840
12841 static void
12842 do_neon_mul_sat_scalar_long (void)
12843 {
12844 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
12845 }
12846
12847 static void
12848 do_neon_vmull (void)
12849 {
12850 if (inst.operands[2].isscalar)
12851 do_neon_mac_maybe_scalar_long ();
12852 else
12853 {
12854 struct neon_type_el et = neon_check_type (3, NS_QDD,
12855 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
12856 if (et.type == NT_poly)
12857 inst.instruction = NEON_ENC_POLY (inst.instruction);
12858 else
12859 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12860 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12861 zero. Should be OK as-is. */
12862 neon_mixed_length (et, et.size);
12863 }
12864 }
12865
12866 static void
12867 do_neon_ext (void)
12868 {
12869 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
12870 struct neon_type_el et = neon_check_type (3, rs,
12871 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12872 unsigned imm = (inst.operands[3].imm * et.size) / 8;
12873 constraint (imm >= (neon_quad (rs) ? 16 : 8), _("shift out of range"));
12874 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12875 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12876 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12877 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12878 inst.instruction |= LOW4 (inst.operands[2].reg);
12879 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12880 inst.instruction |= neon_quad (rs) << 6;
12881 inst.instruction |= imm << 8;
12882
12883 inst.instruction = neon_dp_fixup (inst.instruction);
12884 }
12885
12886 static void
12887 do_neon_rev (void)
12888 {
12889 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12890 struct neon_type_el et = neon_check_type (2, rs,
12891 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12892 unsigned op = (inst.instruction >> 7) & 3;
12893 /* N (width of reversed regions) is encoded as part of the bitmask. We
12894 extract it here to check the elements to be reversed are smaller.
12895 Otherwise we'd get a reserved instruction. */
12896 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
12897 assert (elsize != 0);
12898 constraint (et.size >= elsize,
12899 _("elements must be smaller than reversal region"));
12900 neon_two_same (neon_quad (rs), 1, et.size);
12901 }
12902
12903 static void
12904 do_neon_dup (void)
12905 {
12906 if (inst.operands[1].isscalar)
12907 {
12908 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
12909 struct neon_type_el et = neon_check_type (2, rs,
12910 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12911 unsigned sizebits = et.size >> 3;
12912 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
12913 int logsize = neon_logbits (et.size);
12914 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
12915
12916 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
12917 return;
12918
12919 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12920 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12921 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12922 inst.instruction |= LOW4 (dm);
12923 inst.instruction |= HI1 (dm) << 5;
12924 inst.instruction |= neon_quad (rs) << 6;
12925 inst.instruction |= x << 17;
12926 inst.instruction |= sizebits << 16;
12927
12928 inst.instruction = neon_dp_fixup (inst.instruction);
12929 }
12930 else
12931 {
12932 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
12933 struct neon_type_el et = neon_check_type (2, rs,
12934 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12935 /* Duplicate ARM register to lanes of vector. */
12936 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
12937 switch (et.size)
12938 {
12939 case 8: inst.instruction |= 0x400000; break;
12940 case 16: inst.instruction |= 0x000020; break;
12941 case 32: inst.instruction |= 0x000000; break;
12942 default: break;
12943 }
12944 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12945 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
12946 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
12947 inst.instruction |= neon_quad (rs) << 21;
12948 /* The encoding for this instruction is identical for the ARM and Thumb
12949 variants, except for the condition field. */
12950 do_vfp_cond_or_thumb ();
12951 }
12952 }
12953
12954 /* VMOV has particularly many variations. It can be one of:
12955 0. VMOV<c><q> <Qd>, <Qm>
12956 1. VMOV<c><q> <Dd>, <Dm>
12957 (Register operations, which are VORR with Rm = Rn.)
12958 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12959 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12960 (Immediate loads.)
12961 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12962 (ARM register to scalar.)
12963 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12964 (Two ARM registers to vector.)
12965 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12966 (Scalar to ARM register.)
12967 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12968 (Vector to two ARM registers.)
12969 8. VMOV.F32 <Sd>, <Sm>
12970 9. VMOV.F64 <Dd>, <Dm>
12971 (VFP register moves.)
12972 10. VMOV.F32 <Sd>, #imm
12973 11. VMOV.F64 <Dd>, #imm
12974 (VFP float immediate load.)
12975 12. VMOV <Rd>, <Sm>
12976 (VFP single to ARM reg.)
12977 13. VMOV <Sd>, <Rm>
12978 (ARM reg to VFP single.)
12979 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12980 (Two ARM regs to two VFP singles.)
12981 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12982 (Two VFP singles to two ARM regs.)
12983
12984 These cases can be disambiguated using neon_select_shape, except cases 1/9
12985 and 3/11 which depend on the operand type too.
12986
12987 All the encoded bits are hardcoded by this function.
12988
12989 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12990 Cases 5, 7 may be used with VFPv2 and above.
12991
12992 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12993 can specify a type where it doesn't make sense to, and is ignored).
12994 */
12995
12996 static void
12997 do_neon_mov (void)
12998 {
12999 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
13000 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
13001 NS_NULL);
13002 struct neon_type_el et;
13003 const char *ldconst = 0;
13004
13005 switch (rs)
13006 {
13007 case NS_DD: /* case 1/9. */
13008 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13009 /* It is not an error here if no type is given. */
13010 inst.error = NULL;
13011 if (et.type == NT_float && et.size == 64)
13012 {
13013 do_vfp_nsyn_opcode ("fcpyd");
13014 break;
13015 }
13016 /* fall through. */
13017
13018 case NS_QQ: /* case 0/1. */
13019 {
13020 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13021 return;
13022 /* The architecture manual I have doesn't explicitly state which
13023 value the U bit should have for register->register moves, but
13024 the equivalent VORR instruction has U = 0, so do that. */
13025 inst.instruction = 0x0200110;
13026 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13027 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13028 inst.instruction |= LOW4 (inst.operands[1].reg);
13029 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13030 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13031 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13032 inst.instruction |= neon_quad (rs) << 6;
13033
13034 inst.instruction = neon_dp_fixup (inst.instruction);
13035 }
13036 break;
13037
13038 case NS_DI: /* case 3/11. */
13039 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13040 inst.error = NULL;
13041 if (et.type == NT_float && et.size == 64)
13042 {
13043 /* case 11 (fconstd). */
13044 ldconst = "fconstd";
13045 goto encode_fconstd;
13046 }
13047 /* fall through. */
13048
13049 case NS_QI: /* case 2/3. */
13050 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13051 return;
13052 inst.instruction = 0x0800010;
13053 neon_move_immediate ();
13054 inst.instruction = neon_dp_fixup (inst.instruction);
13055 break;
13056
13057 case NS_SR: /* case 4. */
13058 {
13059 unsigned bcdebits = 0;
13060 struct neon_type_el et = neon_check_type (2, NS_NULL,
13061 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13062 int logsize = neon_logbits (et.size);
13063 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
13064 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
13065
13066 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13067 _(BAD_FPU));
13068 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13069 && et.size != 32, _(BAD_FPU));
13070 constraint (et.type == NT_invtype, _("bad type for scalar"));
13071 constraint (x >= 64 / et.size, _("scalar index out of range"));
13072
13073 switch (et.size)
13074 {
13075 case 8: bcdebits = 0x8; break;
13076 case 16: bcdebits = 0x1; break;
13077 case 32: bcdebits = 0x0; break;
13078 default: ;
13079 }
13080
13081 bcdebits |= x << logsize;
13082
13083 inst.instruction = 0xe000b10;
13084 do_vfp_cond_or_thumb ();
13085 inst.instruction |= LOW4 (dn) << 16;
13086 inst.instruction |= HI1 (dn) << 7;
13087 inst.instruction |= inst.operands[1].reg << 12;
13088 inst.instruction |= (bcdebits & 3) << 5;
13089 inst.instruction |= (bcdebits >> 2) << 21;
13090 }
13091 break;
13092
13093 case NS_DRR: /* case 5 (fmdrr). */
13094 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13095 _(BAD_FPU));
13096
13097 inst.instruction = 0xc400b10;
13098 do_vfp_cond_or_thumb ();
13099 inst.instruction |= LOW4 (inst.operands[0].reg);
13100 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
13101 inst.instruction |= inst.operands[1].reg << 12;
13102 inst.instruction |= inst.operands[2].reg << 16;
13103 break;
13104
13105 case NS_RS: /* case 6. */
13106 {
13107 struct neon_type_el et = neon_check_type (2, NS_NULL,
13108 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
13109 unsigned logsize = neon_logbits (et.size);
13110 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
13111 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
13112 unsigned abcdebits = 0;
13113
13114 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13115 _(BAD_FPU));
13116 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13117 && et.size != 32, _(BAD_FPU));
13118 constraint (et.type == NT_invtype, _("bad type for scalar"));
13119 constraint (x >= 64 / et.size, _("scalar index out of range"));
13120
13121 switch (et.size)
13122 {
13123 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
13124 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
13125 case 32: abcdebits = 0x00; break;
13126 default: ;
13127 }
13128
13129 abcdebits |= x << logsize;
13130 inst.instruction = 0xe100b10;
13131 do_vfp_cond_or_thumb ();
13132 inst.instruction |= LOW4 (dn) << 16;
13133 inst.instruction |= HI1 (dn) << 7;
13134 inst.instruction |= inst.operands[0].reg << 12;
13135 inst.instruction |= (abcdebits & 3) << 5;
13136 inst.instruction |= (abcdebits >> 2) << 21;
13137 }
13138 break;
13139
13140 case NS_RRD: /* case 7 (fmrrd). */
13141 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13142 _(BAD_FPU));
13143
13144 inst.instruction = 0xc500b10;
13145 do_vfp_cond_or_thumb ();
13146 inst.instruction |= inst.operands[0].reg << 12;
13147 inst.instruction |= inst.operands[1].reg << 16;
13148 inst.instruction |= LOW4 (inst.operands[2].reg);
13149 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13150 break;
13151
13152 case NS_FF: /* case 8 (fcpys). */
13153 do_vfp_nsyn_opcode ("fcpys");
13154 break;
13155
13156 case NS_FI: /* case 10 (fconsts). */
13157 ldconst = "fconsts";
13158 encode_fconstd:
13159 if (is_quarter_float (inst.operands[1].imm))
13160 {
13161 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
13162 do_vfp_nsyn_opcode (ldconst);
13163 }
13164 else
13165 first_error (_("immediate out of range"));
13166 break;
13167
13168 case NS_RF: /* case 12 (fmrs). */
13169 do_vfp_nsyn_opcode ("fmrs");
13170 break;
13171
13172 case NS_FR: /* case 13 (fmsr). */
13173 do_vfp_nsyn_opcode ("fmsr");
13174 break;
13175
13176 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13177 (one of which is a list), but we have parsed four. Do some fiddling to
13178 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13179 expect. */
13180 case NS_RRFF: /* case 14 (fmrrs). */
13181 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
13182 _("VFP registers must be adjacent"));
13183 inst.operands[2].imm = 2;
13184 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13185 do_vfp_nsyn_opcode ("fmrrs");
13186 break;
13187
13188 case NS_FFRR: /* case 15 (fmsrr). */
13189 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13190 _("VFP registers must be adjacent"));
13191 inst.operands[1] = inst.operands[2];
13192 inst.operands[2] = inst.operands[3];
13193 inst.operands[0].imm = 2;
13194 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13195 do_vfp_nsyn_opcode ("fmsrr");
13196 break;
13197
13198 default:
13199 abort ();
13200 }
13201 }
13202
13203 static void
13204 do_neon_rshift_round_imm (void)
13205 {
13206 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13207 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13208 int imm = inst.operands[2].imm;
13209
13210 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13211 if (imm == 0)
13212 {
13213 inst.operands[2].present = 0;
13214 do_neon_mov ();
13215 return;
13216 }
13217
13218 constraint (imm < 1 || (unsigned)imm > et.size,
13219 _("immediate out of range for shift"));
13220 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13221 et.size - imm);
13222 }
13223
13224 static void
13225 do_neon_movl (void)
13226 {
13227 struct neon_type_el et = neon_check_type (2, NS_QD,
13228 N_EQK | N_DBL, N_SU_32 | N_KEY);
13229 unsigned sizebits = et.size >> 3;
13230 inst.instruction |= sizebits << 19;
13231 neon_two_same (0, et.type == NT_unsigned, -1);
13232 }
13233
13234 static void
13235 do_neon_trn (void)
13236 {
13237 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13238 struct neon_type_el et = neon_check_type (2, rs,
13239 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13240 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13241 neon_two_same (neon_quad (rs), 1, et.size);
13242 }
13243
13244 static void
13245 do_neon_zip_uzp (void)
13246 {
13247 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13248 struct neon_type_el et = neon_check_type (2, rs,
13249 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13250 if (rs == NS_DD && et.size == 32)
13251 {
13252 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13253 inst.instruction = N_MNEM_vtrn;
13254 do_neon_trn ();
13255 return;
13256 }
13257 neon_two_same (neon_quad (rs), 1, et.size);
13258 }
13259
13260 static void
13261 do_neon_sat_abs_neg (void)
13262 {
13263 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13264 struct neon_type_el et = neon_check_type (2, rs,
13265 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13266 neon_two_same (neon_quad (rs), 1, et.size);
13267 }
13268
13269 static void
13270 do_neon_pair_long (void)
13271 {
13272 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13273 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13274 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13275 inst.instruction |= (et.type == NT_unsigned) << 7;
13276 neon_two_same (neon_quad (rs), 1, et.size);
13277 }
13278
13279 static void
13280 do_neon_recip_est (void)
13281 {
13282 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13283 struct neon_type_el et = neon_check_type (2, rs,
13284 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13285 inst.instruction |= (et.type == NT_float) << 8;
13286 neon_two_same (neon_quad (rs), 1, et.size);
13287 }
13288
13289 static void
13290 do_neon_cls (void)
13291 {
13292 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13293 struct neon_type_el et = neon_check_type (2, rs,
13294 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13295 neon_two_same (neon_quad (rs), 1, et.size);
13296 }
13297
13298 static void
13299 do_neon_clz (void)
13300 {
13301 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13302 struct neon_type_el et = neon_check_type (2, rs,
13303 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13304 neon_two_same (neon_quad (rs), 1, et.size);
13305 }
13306
13307 static void
13308 do_neon_cnt (void)
13309 {
13310 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13311 struct neon_type_el et = neon_check_type (2, rs,
13312 N_EQK | N_INT, N_8 | N_KEY);
13313 neon_two_same (neon_quad (rs), 1, et.size);
13314 }
13315
13316 static void
13317 do_neon_swp (void)
13318 {
13319 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13320 neon_two_same (neon_quad (rs), 1, -1);
13321 }
13322
13323 static void
13324 do_neon_tbl_tbx (void)
13325 {
13326 unsigned listlenbits;
13327 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13328
13329 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13330 {
13331 first_error (_("bad list length for table lookup"));
13332 return;
13333 }
13334
13335 listlenbits = inst.operands[1].imm - 1;
13336 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13337 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13338 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13339 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13340 inst.instruction |= LOW4 (inst.operands[2].reg);
13341 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13342 inst.instruction |= listlenbits << 8;
13343
13344 inst.instruction = neon_dp_fixup (inst.instruction);
13345 }
13346
13347 static void
13348 do_neon_ldm_stm (void)
13349 {
13350 /* P, U and L bits are part of bitmask. */
13351 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13352 unsigned offsetbits = inst.operands[1].imm * 2;
13353
13354 if (inst.operands[1].issingle)
13355 {
13356 do_vfp_nsyn_ldm_stm (is_dbmode);
13357 return;
13358 }
13359
13360 constraint (is_dbmode && !inst.operands[0].writeback,
13361 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13362
13363 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13364 _("register list must contain at least 1 and at most 16 "
13365 "registers"));
13366
13367 inst.instruction |= inst.operands[0].reg << 16;
13368 inst.instruction |= inst.operands[0].writeback << 21;
13369 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13370 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13371
13372 inst.instruction |= offsetbits;
13373
13374 do_vfp_cond_or_thumb ();
13375 }
13376
13377 static void
13378 do_neon_ldr_str (void)
13379 {
13380 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13381
13382 if (inst.operands[0].issingle)
13383 {
13384 if (is_ldr)
13385 do_vfp_nsyn_opcode ("flds");
13386 else
13387 do_vfp_nsyn_opcode ("fsts");
13388 }
13389 else
13390 {
13391 if (is_ldr)
13392 do_vfp_nsyn_opcode ("fldd");
13393 else
13394 do_vfp_nsyn_opcode ("fstd");
13395 }
13396 }
13397
13398 /* "interleave" version also handles non-interleaving register VLD1/VST1
13399 instructions. */
13400
13401 static void
13402 do_neon_ld_st_interleave (void)
13403 {
13404 struct neon_type_el et = neon_check_type (1, NS_NULL,
13405 N_8 | N_16 | N_32 | N_64);
13406 unsigned alignbits = 0;
13407 unsigned idx;
13408 /* The bits in this table go:
13409 0: register stride of one (0) or two (1)
13410 1,2: register list length, minus one (1, 2, 3, 4).
13411 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13412 We use -1 for invalid entries. */
13413 const int typetable[] =
13414 {
13415 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13416 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13417 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13418 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13419 };
13420 int typebits;
13421
13422 if (et.type == NT_invtype)
13423 return;
13424
13425 if (inst.operands[1].immisalign)
13426 switch (inst.operands[1].imm >> 8)
13427 {
13428 case 64: alignbits = 1; break;
13429 case 128:
13430 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13431 goto bad_alignment;
13432 alignbits = 2;
13433 break;
13434 case 256:
13435 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13436 goto bad_alignment;
13437 alignbits = 3;
13438 break;
13439 default:
13440 bad_alignment:
13441 first_error (_("bad alignment"));
13442 return;
13443 }
13444
13445 inst.instruction |= alignbits << 4;
13446 inst.instruction |= neon_logbits (et.size) << 6;
13447
13448 /* Bits [4:6] of the immediate in a list specifier encode register stride
13449 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13450 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13451 up the right value for "type" in a table based on this value and the given
13452 list style, then stick it back. */
13453 idx = ((inst.operands[0].imm >> 4) & 7)
13454 | (((inst.instruction >> 8) & 3) << 3);
13455
13456 typebits = typetable[idx];
13457
13458 constraint (typebits == -1, _("bad list type for instruction"));
13459
13460 inst.instruction &= ~0xf00;
13461 inst.instruction |= typebits << 8;
13462 }
13463
13464 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13465 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13466 otherwise. The variable arguments are a list of pairs of legal (size, align)
13467 values, terminated with -1. */
13468
13469 static int
13470 neon_alignment_bit (int size, int align, int *do_align, ...)
13471 {
13472 va_list ap;
13473 int result = FAIL, thissize, thisalign;
13474
13475 if (!inst.operands[1].immisalign)
13476 {
13477 *do_align = 0;
13478 return SUCCESS;
13479 }
13480
13481 va_start (ap, do_align);
13482
13483 do
13484 {
13485 thissize = va_arg (ap, int);
13486 if (thissize == -1)
13487 break;
13488 thisalign = va_arg (ap, int);
13489
13490 if (size == thissize && align == thisalign)
13491 result = SUCCESS;
13492 }
13493 while (result != SUCCESS);
13494
13495 va_end (ap);
13496
13497 if (result == SUCCESS)
13498 *do_align = 1;
13499 else
13500 first_error (_("unsupported alignment for instruction"));
13501
13502 return result;
13503 }
13504
13505 static void
13506 do_neon_ld_st_lane (void)
13507 {
13508 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13509 int align_good, do_align = 0;
13510 int logsize = neon_logbits (et.size);
13511 int align = inst.operands[1].imm >> 8;
13512 int n = (inst.instruction >> 8) & 3;
13513 int max_el = 64 / et.size;
13514
13515 if (et.type == NT_invtype)
13516 return;
13517
13518 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
13519 _("bad list length"));
13520 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
13521 _("scalar index out of range"));
13522 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
13523 && et.size == 8,
13524 _("stride of 2 unavailable when element size is 8"));
13525
13526 switch (n)
13527 {
13528 case 0: /* VLD1 / VST1. */
13529 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
13530 32, 32, -1);
13531 if (align_good == FAIL)
13532 return;
13533 if (do_align)
13534 {
13535 unsigned alignbits = 0;
13536 switch (et.size)
13537 {
13538 case 16: alignbits = 0x1; break;
13539 case 32: alignbits = 0x3; break;
13540 default: ;
13541 }
13542 inst.instruction |= alignbits << 4;
13543 }
13544 break;
13545
13546 case 1: /* VLD2 / VST2. */
13547 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
13548 32, 64, -1);
13549 if (align_good == FAIL)
13550 return;
13551 if (do_align)
13552 inst.instruction |= 1 << 4;
13553 break;
13554
13555 case 2: /* VLD3 / VST3. */
13556 constraint (inst.operands[1].immisalign,
13557 _("can't use alignment with this instruction"));
13558 break;
13559
13560 case 3: /* VLD4 / VST4. */
13561 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13562 16, 64, 32, 64, 32, 128, -1);
13563 if (align_good == FAIL)
13564 return;
13565 if (do_align)
13566 {
13567 unsigned alignbits = 0;
13568 switch (et.size)
13569 {
13570 case 8: alignbits = 0x1; break;
13571 case 16: alignbits = 0x1; break;
13572 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
13573 default: ;
13574 }
13575 inst.instruction |= alignbits << 4;
13576 }
13577 break;
13578
13579 default: ;
13580 }
13581
13582 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13583 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13584 inst.instruction |= 1 << (4 + logsize);
13585
13586 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
13587 inst.instruction |= logsize << 10;
13588 }
13589
13590 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13591
13592 static void
13593 do_neon_ld_dup (void)
13594 {
13595 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13596 int align_good, do_align = 0;
13597
13598 if (et.type == NT_invtype)
13599 return;
13600
13601 switch ((inst.instruction >> 8) & 3)
13602 {
13603 case 0: /* VLD1. */
13604 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
13605 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13606 &do_align, 16, 16, 32, 32, -1);
13607 if (align_good == FAIL)
13608 return;
13609 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
13610 {
13611 case 1: break;
13612 case 2: inst.instruction |= 1 << 5; break;
13613 default: first_error (_("bad list length")); return;
13614 }
13615 inst.instruction |= neon_logbits (et.size) << 6;
13616 break;
13617
13618 case 1: /* VLD2. */
13619 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13620 &do_align, 8, 16, 16, 32, 32, 64, -1);
13621 if (align_good == FAIL)
13622 return;
13623 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
13624 _("bad list length"));
13625 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13626 inst.instruction |= 1 << 5;
13627 inst.instruction |= neon_logbits (et.size) << 6;
13628 break;
13629
13630 case 2: /* VLD3. */
13631 constraint (inst.operands[1].immisalign,
13632 _("can't use alignment with this instruction"));
13633 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
13634 _("bad list length"));
13635 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13636 inst.instruction |= 1 << 5;
13637 inst.instruction |= neon_logbits (et.size) << 6;
13638 break;
13639
13640 case 3: /* VLD4. */
13641 {
13642 int align = inst.operands[1].imm >> 8;
13643 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13644 16, 64, 32, 64, 32, 128, -1);
13645 if (align_good == FAIL)
13646 return;
13647 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
13648 _("bad list length"));
13649 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13650 inst.instruction |= 1 << 5;
13651 if (et.size == 32 && align == 128)
13652 inst.instruction |= 0x3 << 6;
13653 else
13654 inst.instruction |= neon_logbits (et.size) << 6;
13655 }
13656 break;
13657
13658 default: ;
13659 }
13660
13661 inst.instruction |= do_align << 4;
13662 }
13663
13664 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13665 apart from bits [11:4]. */
13666
13667 static void
13668 do_neon_ldx_stx (void)
13669 {
13670 switch (NEON_LANE (inst.operands[0].imm))
13671 {
13672 case NEON_INTERLEAVE_LANES:
13673 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
13674 do_neon_ld_st_interleave ();
13675 break;
13676
13677 case NEON_ALL_LANES:
13678 inst.instruction = NEON_ENC_DUP (inst.instruction);
13679 do_neon_ld_dup ();
13680 break;
13681
13682 default:
13683 inst.instruction = NEON_ENC_LANE (inst.instruction);
13684 do_neon_ld_st_lane ();
13685 }
13686
13687 /* L bit comes from bit mask. */
13688 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13689 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13690 inst.instruction |= inst.operands[1].reg << 16;
13691
13692 if (inst.operands[1].postind)
13693 {
13694 int postreg = inst.operands[1].imm & 0xf;
13695 constraint (!inst.operands[1].immisreg,
13696 _("post-index must be a register"));
13697 constraint (postreg == 0xd || postreg == 0xf,
13698 _("bad register for post-index"));
13699 inst.instruction |= postreg;
13700 }
13701 else if (inst.operands[1].writeback)
13702 {
13703 inst.instruction |= 0xd;
13704 }
13705 else
13706 inst.instruction |= 0xf;
13707
13708 if (thumb_mode)
13709 inst.instruction |= 0xf9000000;
13710 else
13711 inst.instruction |= 0xf4000000;
13712 }
13713
13714 \f
13715 /* Overall per-instruction processing. */
13716
13717 /* We need to be able to fix up arbitrary expressions in some statements.
13718 This is so that we can handle symbols that are an arbitrary distance from
13719 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13720 which returns part of an address in a form which will be valid for
13721 a data instruction. We do this by pushing the expression into a symbol
13722 in the expr_section, and creating a fix for that. */
13723
13724 static void
13725 fix_new_arm (fragS * frag,
13726 int where,
13727 short int size,
13728 expressionS * exp,
13729 int pc_rel,
13730 int reloc)
13731 {
13732 fixS * new_fix;
13733
13734 switch (exp->X_op)
13735 {
13736 case O_constant:
13737 case O_symbol:
13738 case O_add:
13739 case O_subtract:
13740 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13741 break;
13742
13743 default:
13744 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13745 pc_rel, reloc);
13746 break;
13747 }
13748
13749 /* Mark whether the fix is to a THUMB instruction, or an ARM
13750 instruction. */
13751 new_fix->tc_fix_data = thumb_mode;
13752 }
13753
13754 /* Create a frg for an instruction requiring relaxation. */
13755 static void
13756 output_relax_insn (void)
13757 {
13758 char * to;
13759 symbolS *sym;
13760 int offset;
13761
13762 /* The size of the instruction is unknown, so tie the debug info to the
13763 start of the instruction. */
13764 dwarf2_emit_insn (0);
13765
13766 switch (inst.reloc.exp.X_op)
13767 {
13768 case O_symbol:
13769 sym = inst.reloc.exp.X_add_symbol;
13770 offset = inst.reloc.exp.X_add_number;
13771 break;
13772 case O_constant:
13773 sym = NULL;
13774 offset = inst.reloc.exp.X_add_number;
13775 break;
13776 default:
13777 sym = make_expr_symbol (&inst.reloc.exp);
13778 offset = 0;
13779 break;
13780 }
13781 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13782 inst.relax, sym, offset, NULL/*offset, opcode*/);
13783 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13784 }
13785
13786 /* Write a 32-bit thumb instruction to buf. */
13787 static void
13788 put_thumb32_insn (char * buf, unsigned long insn)
13789 {
13790 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13791 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13792 }
13793
13794 static void
13795 output_inst (const char * str)
13796 {
13797 char * to = NULL;
13798
13799 if (inst.error)
13800 {
13801 as_bad ("%s -- `%s'", inst.error, str);
13802 return;
13803 }
13804 if (inst.relax) {
13805 output_relax_insn();
13806 return;
13807 }
13808 if (inst.size == 0)
13809 return;
13810
13811 to = frag_more (inst.size);
13812
13813 if (thumb_mode && (inst.size > THUMB_SIZE))
13814 {
13815 assert (inst.size == (2 * THUMB_SIZE));
13816 put_thumb32_insn (to, inst.instruction);
13817 }
13818 else if (inst.size > INSN_SIZE)
13819 {
13820 assert (inst.size == (2 * INSN_SIZE));
13821 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13822 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13823 }
13824 else
13825 md_number_to_chars (to, inst.instruction, inst.size);
13826
13827 if (inst.reloc.type != BFD_RELOC_UNUSED)
13828 fix_new_arm (frag_now, to - frag_now->fr_literal,
13829 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
13830 inst.reloc.type);
13831
13832 dwarf2_emit_insn (inst.size);
13833 }
13834
13835 /* Tag values used in struct asm_opcode's tag field. */
13836 enum opcode_tag
13837 {
13838 OT_unconditional, /* Instruction cannot be conditionalized.
13839 The ARM condition field is still 0xE. */
13840 OT_unconditionalF, /* Instruction cannot be conditionalized
13841 and carries 0xF in its ARM condition field. */
13842 OT_csuffix, /* Instruction takes a conditional suffix. */
13843 OT_csuffixF, /* Some forms of the instruction take a conditional
13844 suffix, others place 0xF where the condition field
13845 would be. */
13846 OT_cinfix3, /* Instruction takes a conditional infix,
13847 beginning at character index 3. (In
13848 unified mode, it becomes a suffix.) */
13849 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
13850 tsts, cmps, cmns, and teqs. */
13851 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
13852 character index 3, even in unified mode. Used for
13853 legacy instructions where suffix and infix forms
13854 may be ambiguous. */
13855 OT_csuf_or_in3, /* Instruction takes either a conditional
13856 suffix or an infix at character index 3. */
13857 OT_odd_infix_unc, /* This is the unconditional variant of an
13858 instruction that takes a conditional infix
13859 at an unusual position. In unified mode,
13860 this variant will accept a suffix. */
13861 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
13862 are the conditional variants of instructions that
13863 take conditional infixes in unusual positions.
13864 The infix appears at character index
13865 (tag - OT_odd_infix_0). These are not accepted
13866 in unified mode. */
13867 };
13868
13869 /* Subroutine of md_assemble, responsible for looking up the primary
13870 opcode from the mnemonic the user wrote. STR points to the
13871 beginning of the mnemonic.
13872
13873 This is not simply a hash table lookup, because of conditional
13874 variants. Most instructions have conditional variants, which are
13875 expressed with a _conditional affix_ to the mnemonic. If we were
13876 to encode each conditional variant as a literal string in the opcode
13877 table, it would have approximately 20,000 entries.
13878
13879 Most mnemonics take this affix as a suffix, and in unified syntax,
13880 'most' is upgraded to 'all'. However, in the divided syntax, some
13881 instructions take the affix as an infix, notably the s-variants of
13882 the arithmetic instructions. Of those instructions, all but six
13883 have the infix appear after the third character of the mnemonic.
13884
13885 Accordingly, the algorithm for looking up primary opcodes given
13886 an identifier is:
13887
13888 1. Look up the identifier in the opcode table.
13889 If we find a match, go to step U.
13890
13891 2. Look up the last two characters of the identifier in the
13892 conditions table. If we find a match, look up the first N-2
13893 characters of the identifier in the opcode table. If we
13894 find a match, go to step CE.
13895
13896 3. Look up the fourth and fifth characters of the identifier in
13897 the conditions table. If we find a match, extract those
13898 characters from the identifier, and look up the remaining
13899 characters in the opcode table. If we find a match, go
13900 to step CM.
13901
13902 4. Fail.
13903
13904 U. Examine the tag field of the opcode structure, in case this is
13905 one of the six instructions with its conditional infix in an
13906 unusual place. If it is, the tag tells us where to find the
13907 infix; look it up in the conditions table and set inst.cond
13908 accordingly. Otherwise, this is an unconditional instruction.
13909 Again set inst.cond accordingly. Return the opcode structure.
13910
13911 CE. Examine the tag field to make sure this is an instruction that
13912 should receive a conditional suffix. If it is not, fail.
13913 Otherwise, set inst.cond from the suffix we already looked up,
13914 and return the opcode structure.
13915
13916 CM. Examine the tag field to make sure this is an instruction that
13917 should receive a conditional infix after the third character.
13918 If it is not, fail. Otherwise, undo the edits to the current
13919 line of input and proceed as for case CE. */
13920
13921 static const struct asm_opcode *
13922 opcode_lookup (char **str)
13923 {
13924 char *end, *base;
13925 char *affix;
13926 const struct asm_opcode *opcode;
13927 const struct asm_cond *cond;
13928 char save[2];
13929 bfd_boolean neon_supported;
13930
13931 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
13932
13933 /* Scan up to the end of the mnemonic, which must end in white space,
13934 '.' (in unified mode, or for Neon instructions), or end of string. */
13935 for (base = end = *str; *end != '\0'; end++)
13936 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
13937 break;
13938
13939 if (end == base)
13940 return 0;
13941
13942 /* Handle a possible width suffix and/or Neon type suffix. */
13943 if (end[0] == '.')
13944 {
13945 int offset = 2;
13946
13947 /* The .w and .n suffixes are only valid if the unified syntax is in
13948 use. */
13949 if (unified_syntax && end[1] == 'w')
13950 inst.size_req = 4;
13951 else if (unified_syntax && end[1] == 'n')
13952 inst.size_req = 2;
13953 else
13954 offset = 0;
13955
13956 inst.vectype.elems = 0;
13957
13958 *str = end + offset;
13959
13960 if (end[offset] == '.')
13961 {
13962 /* See if we have a Neon type suffix (possible in either unified or
13963 non-unified ARM syntax mode). */
13964 if (parse_neon_type (&inst.vectype, str) == FAIL)
13965 return 0;
13966 }
13967 else if (end[offset] != '\0' && end[offset] != ' ')
13968 return 0;
13969 }
13970 else
13971 *str = end;
13972
13973 /* Look for unaffixed or special-case affixed mnemonic. */
13974 opcode = hash_find_n (arm_ops_hsh, base, end - base);
13975 if (opcode)
13976 {
13977 /* step U */
13978 if (opcode->tag < OT_odd_infix_0)
13979 {
13980 inst.cond = COND_ALWAYS;
13981 return opcode;
13982 }
13983
13984 if (unified_syntax)
13985 as_warn (_("conditional infixes are deprecated in unified syntax"));
13986 affix = base + (opcode->tag - OT_odd_infix_0);
13987 cond = hash_find_n (arm_cond_hsh, affix, 2);
13988 assert (cond);
13989
13990 inst.cond = cond->value;
13991 return opcode;
13992 }
13993
13994 /* Cannot have a conditional suffix on a mnemonic of less than two
13995 characters. */
13996 if (end - base < 3)
13997 return 0;
13998
13999 /* Look for suffixed mnemonic. */
14000 affix = end - 2;
14001 cond = hash_find_n (arm_cond_hsh, affix, 2);
14002 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
14003 if (opcode && cond)
14004 {
14005 /* step CE */
14006 switch (opcode->tag)
14007 {
14008 case OT_cinfix3_legacy:
14009 /* Ignore conditional suffixes matched on infix only mnemonics. */
14010 break;
14011
14012 case OT_cinfix3:
14013 case OT_cinfix3_deprecated:
14014 case OT_odd_infix_unc:
14015 if (!unified_syntax)
14016 return 0;
14017 /* else fall through */
14018
14019 case OT_csuffix:
14020 case OT_csuffixF:
14021 case OT_csuf_or_in3:
14022 inst.cond = cond->value;
14023 return opcode;
14024
14025 case OT_unconditional:
14026 case OT_unconditionalF:
14027 if (thumb_mode)
14028 {
14029 inst.cond = cond->value;
14030 }
14031 else
14032 {
14033 /* delayed diagnostic */
14034 inst.error = BAD_COND;
14035 inst.cond = COND_ALWAYS;
14036 }
14037 return opcode;
14038
14039 default:
14040 return 0;
14041 }
14042 }
14043
14044 /* Cannot have a usual-position infix on a mnemonic of less than
14045 six characters (five would be a suffix). */
14046 if (end - base < 6)
14047 return 0;
14048
14049 /* Look for infixed mnemonic in the usual position. */
14050 affix = base + 3;
14051 cond = hash_find_n (arm_cond_hsh, affix, 2);
14052 if (!cond)
14053 return 0;
14054
14055 memcpy (save, affix, 2);
14056 memmove (affix, affix + 2, (end - affix) - 2);
14057 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
14058 memmove (affix + 2, affix, (end - affix) - 2);
14059 memcpy (affix, save, 2);
14060
14061 if (opcode
14062 && (opcode->tag == OT_cinfix3
14063 || opcode->tag == OT_cinfix3_deprecated
14064 || opcode->tag == OT_csuf_or_in3
14065 || opcode->tag == OT_cinfix3_legacy))
14066 {
14067 /* step CM */
14068 if (unified_syntax
14069 && (opcode->tag == OT_cinfix3
14070 || opcode->tag == OT_cinfix3_deprecated))
14071 as_warn (_("conditional infixes are deprecated in unified syntax"));
14072
14073 inst.cond = cond->value;
14074 return opcode;
14075 }
14076
14077 return 0;
14078 }
14079
14080 void
14081 md_assemble (char *str)
14082 {
14083 char *p = str;
14084 const struct asm_opcode * opcode;
14085
14086 /* Align the previous label if needed. */
14087 if (last_label_seen != NULL)
14088 {
14089 symbol_set_frag (last_label_seen, frag_now);
14090 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
14091 S_SET_SEGMENT (last_label_seen, now_seg);
14092 }
14093
14094 memset (&inst, '\0', sizeof (inst));
14095 inst.reloc.type = BFD_RELOC_UNUSED;
14096
14097 opcode = opcode_lookup (&p);
14098 if (!opcode)
14099 {
14100 /* It wasn't an instruction, but it might be a register alias of
14101 the form alias .req reg, or a Neon .dn/.qn directive. */
14102 if (!create_register_alias (str, p)
14103 && !create_neon_reg_alias (str, p))
14104 as_bad (_("bad instruction `%s'"), str);
14105
14106 return;
14107 }
14108
14109 if (opcode->tag == OT_cinfix3_deprecated)
14110 as_warn (_("s suffix on comparison instruction is deprecated"));
14111
14112 /* The value which unconditional instructions should have in place of the
14113 condition field. */
14114 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
14115
14116 if (thumb_mode)
14117 {
14118 arm_feature_set variant;
14119
14120 variant = cpu_variant;
14121 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14122 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
14123 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
14124 /* Check that this instruction is supported for this CPU. */
14125 if (!opcode->tvariant
14126 || (thumb_mode == 1
14127 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
14128 {
14129 as_bad (_("selected processor does not support `%s'"), str);
14130 return;
14131 }
14132 if (inst.cond != COND_ALWAYS && !unified_syntax
14133 && opcode->tencode != do_t_branch)
14134 {
14135 as_bad (_("Thumb does not support conditional execution"));
14136 return;
14137 }
14138
14139 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2) && !inst.size_req)
14140 {
14141 /* Implicit require narrow instructions on Thumb-1. This avoids
14142 relaxation accidentally introducing Thumb-2 instructions. */
14143 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23)
14144 inst.size_req = 2;
14145 }
14146
14147 /* Check conditional suffixes. */
14148 if (current_it_mask)
14149 {
14150 int cond;
14151 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
14152 current_it_mask <<= 1;
14153 current_it_mask &= 0x1f;
14154 /* The BKPT instruction is unconditional even in an IT block. */
14155 if (!inst.error
14156 && cond != inst.cond && opcode->tencode != do_t_bkpt)
14157 {
14158 as_bad (_("incorrect condition in IT block"));
14159 return;
14160 }
14161 }
14162 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
14163 {
14164 as_bad (_("thumb conditional instrunction not in IT block"));
14165 return;
14166 }
14167
14168 mapping_state (MAP_THUMB);
14169 inst.instruction = opcode->tvalue;
14170
14171 if (!parse_operands (p, opcode->operands))
14172 opcode->tencode ();
14173
14174 /* Clear current_it_mask at the end of an IT block. */
14175 if (current_it_mask == 0x10)
14176 current_it_mask = 0;
14177
14178 if (!(inst.error || inst.relax))
14179 {
14180 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
14181 inst.size = (inst.instruction > 0xffff ? 4 : 2);
14182 if (inst.size_req && inst.size_req != inst.size)
14183 {
14184 as_bad (_("cannot honor width suffix -- `%s'"), str);
14185 return;
14186 }
14187 }
14188
14189 /* Something has gone badly wrong if we try to relax a fixed size
14190 instruction. */
14191 assert (inst.size_req == 0 || !inst.relax);
14192
14193 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14194 *opcode->tvariant);
14195 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14196 set those bits when Thumb-2 32-bit instructions are seen. ie.
14197 anything other than bl/blx.
14198 This is overly pessimistic for relaxable instructions. */
14199 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
14200 || inst.relax)
14201 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14202 arm_ext_v6t2);
14203 }
14204 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
14205 {
14206 /* Check that this instruction is supported for this CPU. */
14207 if (!opcode->avariant ||
14208 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
14209 {
14210 as_bad (_("selected processor does not support `%s'"), str);
14211 return;
14212 }
14213 if (inst.size_req)
14214 {
14215 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14216 return;
14217 }
14218
14219 mapping_state (MAP_ARM);
14220 inst.instruction = opcode->avalue;
14221 if (opcode->tag == OT_unconditionalF)
14222 inst.instruction |= 0xF << 28;
14223 else
14224 inst.instruction |= inst.cond << 28;
14225 inst.size = INSN_SIZE;
14226 if (!parse_operands (p, opcode->operands))
14227 opcode->aencode ();
14228 /* Arm mode bx is marked as both v4T and v5 because it's still required
14229 on a hypothetical non-thumb v5 core. */
14230 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
14231 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
14232 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14233 else
14234 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14235 *opcode->avariant);
14236 }
14237 else
14238 {
14239 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14240 "-- `%s'"), str);
14241 return;
14242 }
14243 output_inst (str);
14244 }
14245
14246 /* Various frobbings of labels and their addresses. */
14247
14248 void
14249 arm_start_line_hook (void)
14250 {
14251 last_label_seen = NULL;
14252 }
14253
14254 void
14255 arm_frob_label (symbolS * sym)
14256 {
14257 last_label_seen = sym;
14258
14259 ARM_SET_THUMB (sym, thumb_mode);
14260
14261 #if defined OBJ_COFF || defined OBJ_ELF
14262 ARM_SET_INTERWORK (sym, support_interwork);
14263 #endif
14264
14265 /* Note - do not allow local symbols (.Lxxx) to be labeled
14266 as Thumb functions. This is because these labels, whilst
14267 they exist inside Thumb code, are not the entry points for
14268 possible ARM->Thumb calls. Also, these labels can be used
14269 as part of a computed goto or switch statement. eg gcc
14270 can generate code that looks like this:
14271
14272 ldr r2, [pc, .Laaa]
14273 lsl r3, r3, #2
14274 ldr r2, [r3, r2]
14275 mov pc, r2
14276
14277 .Lbbb: .word .Lxxx
14278 .Lccc: .word .Lyyy
14279 ..etc...
14280 .Laaa: .word Lbbb
14281
14282 The first instruction loads the address of the jump table.
14283 The second instruction converts a table index into a byte offset.
14284 The third instruction gets the jump address out of the table.
14285 The fourth instruction performs the jump.
14286
14287 If the address stored at .Laaa is that of a symbol which has the
14288 Thumb_Func bit set, then the linker will arrange for this address
14289 to have the bottom bit set, which in turn would mean that the
14290 address computation performed by the third instruction would end
14291 up with the bottom bit set. Since the ARM is capable of unaligned
14292 word loads, the instruction would then load the incorrect address
14293 out of the jump table, and chaos would ensue. */
14294 if (label_is_thumb_function_name
14295 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14296 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14297 {
14298 /* When the address of a Thumb function is taken the bottom
14299 bit of that address should be set. This will allow
14300 interworking between Arm and Thumb functions to work
14301 correctly. */
14302
14303 THUMB_SET_FUNC (sym, 1);
14304
14305 label_is_thumb_function_name = FALSE;
14306 }
14307
14308 dwarf2_emit_label (sym);
14309 }
14310
14311 int
14312 arm_data_in_code (void)
14313 {
14314 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14315 {
14316 *input_line_pointer = '/';
14317 input_line_pointer += 5;
14318 *input_line_pointer = 0;
14319 return 1;
14320 }
14321
14322 return 0;
14323 }
14324
14325 char *
14326 arm_canonicalize_symbol_name (char * name)
14327 {
14328 int len;
14329
14330 if (thumb_mode && (len = strlen (name)) > 5
14331 && streq (name + len - 5, "/data"))
14332 *(name + len - 5) = 0;
14333
14334 return name;
14335 }
14336 \f
14337 /* Table of all register names defined by default. The user can
14338 define additional names with .req. Note that all register names
14339 should appear in both upper and lowercase variants. Some registers
14340 also have mixed-case names. */
14341
14342 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14343 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14344 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14345 #define REGSET(p,t) \
14346 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14347 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14348 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14349 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14350 #define REGSETH(p,t) \
14351 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14352 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14353 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14354 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14355 #define REGSET2(p,t) \
14356 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14357 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14358 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14359 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14360
14361 static const struct reg_entry reg_names[] =
14362 {
14363 /* ARM integer registers. */
14364 REGSET(r, RN), REGSET(R, RN),
14365
14366 /* ATPCS synonyms. */
14367 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14368 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14369 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14370
14371 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14372 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14373 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14374
14375 /* Well-known aliases. */
14376 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14377 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14378
14379 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14380 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14381
14382 /* Coprocessor numbers. */
14383 REGSET(p, CP), REGSET(P, CP),
14384
14385 /* Coprocessor register numbers. The "cr" variants are for backward
14386 compatibility. */
14387 REGSET(c, CN), REGSET(C, CN),
14388 REGSET(cr, CN), REGSET(CR, CN),
14389
14390 /* FPA registers. */
14391 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14392 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14393
14394 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14395 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14396
14397 /* VFP SP registers. */
14398 REGSET(s,VFS), REGSET(S,VFS),
14399 REGSETH(s,VFS), REGSETH(S,VFS),
14400
14401 /* VFP DP Registers. */
14402 REGSET(d,VFD), REGSET(D,VFD),
14403 /* Extra Neon DP registers. */
14404 REGSETH(d,VFD), REGSETH(D,VFD),
14405
14406 /* Neon QP registers. */
14407 REGSET2(q,NQ), REGSET2(Q,NQ),
14408
14409 /* VFP control registers. */
14410 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14411 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14412
14413 /* Maverick DSP coprocessor registers. */
14414 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
14415 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
14416
14417 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
14418 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
14419 REGDEF(dspsc,0,DSPSC),
14420
14421 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
14422 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
14423 REGDEF(DSPSC,0,DSPSC),
14424
14425 /* iWMMXt data registers - p0, c0-15. */
14426 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
14427
14428 /* iWMMXt control registers - p1, c0-3. */
14429 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
14430 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
14431 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
14432 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
14433
14434 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14435 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
14436 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
14437 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
14438 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
14439
14440 /* XScale accumulator registers. */
14441 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
14442 };
14443 #undef REGDEF
14444 #undef REGNUM
14445 #undef REGSET
14446
14447 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14448 within psr_required_here. */
14449 static const struct asm_psr psrs[] =
14450 {
14451 /* Backward compatibility notation. Note that "all" is no longer
14452 truly all possible PSR bits. */
14453 {"all", PSR_c | PSR_f},
14454 {"flg", PSR_f},
14455 {"ctl", PSR_c},
14456
14457 /* Individual flags. */
14458 {"f", PSR_f},
14459 {"c", PSR_c},
14460 {"x", PSR_x},
14461 {"s", PSR_s},
14462 /* Combinations of flags. */
14463 {"fs", PSR_f | PSR_s},
14464 {"fx", PSR_f | PSR_x},
14465 {"fc", PSR_f | PSR_c},
14466 {"sf", PSR_s | PSR_f},
14467 {"sx", PSR_s | PSR_x},
14468 {"sc", PSR_s | PSR_c},
14469 {"xf", PSR_x | PSR_f},
14470 {"xs", PSR_x | PSR_s},
14471 {"xc", PSR_x | PSR_c},
14472 {"cf", PSR_c | PSR_f},
14473 {"cs", PSR_c | PSR_s},
14474 {"cx", PSR_c | PSR_x},
14475 {"fsx", PSR_f | PSR_s | PSR_x},
14476 {"fsc", PSR_f | PSR_s | PSR_c},
14477 {"fxs", PSR_f | PSR_x | PSR_s},
14478 {"fxc", PSR_f | PSR_x | PSR_c},
14479 {"fcs", PSR_f | PSR_c | PSR_s},
14480 {"fcx", PSR_f | PSR_c | PSR_x},
14481 {"sfx", PSR_s | PSR_f | PSR_x},
14482 {"sfc", PSR_s | PSR_f | PSR_c},
14483 {"sxf", PSR_s | PSR_x | PSR_f},
14484 {"sxc", PSR_s | PSR_x | PSR_c},
14485 {"scf", PSR_s | PSR_c | PSR_f},
14486 {"scx", PSR_s | PSR_c | PSR_x},
14487 {"xfs", PSR_x | PSR_f | PSR_s},
14488 {"xfc", PSR_x | PSR_f | PSR_c},
14489 {"xsf", PSR_x | PSR_s | PSR_f},
14490 {"xsc", PSR_x | PSR_s | PSR_c},
14491 {"xcf", PSR_x | PSR_c | PSR_f},
14492 {"xcs", PSR_x | PSR_c | PSR_s},
14493 {"cfs", PSR_c | PSR_f | PSR_s},
14494 {"cfx", PSR_c | PSR_f | PSR_x},
14495 {"csf", PSR_c | PSR_s | PSR_f},
14496 {"csx", PSR_c | PSR_s | PSR_x},
14497 {"cxf", PSR_c | PSR_x | PSR_f},
14498 {"cxs", PSR_c | PSR_x | PSR_s},
14499 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
14500 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
14501 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
14502 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
14503 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
14504 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
14505 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
14506 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
14507 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
14508 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
14509 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
14510 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
14511 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
14512 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
14513 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
14514 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
14515 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
14516 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
14517 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
14518 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
14519 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
14520 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
14521 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
14522 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
14523 };
14524
14525 /* Table of V7M psr names. */
14526 static const struct asm_psr v7m_psrs[] =
14527 {
14528 {"apsr", 0 }, {"APSR", 0 },
14529 {"iapsr", 1 }, {"IAPSR", 1 },
14530 {"eapsr", 2 }, {"EAPSR", 2 },
14531 {"psr", 3 }, {"PSR", 3 },
14532 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
14533 {"ipsr", 5 }, {"IPSR", 5 },
14534 {"epsr", 6 }, {"EPSR", 6 },
14535 {"iepsr", 7 }, {"IEPSR", 7 },
14536 {"msp", 8 }, {"MSP", 8 },
14537 {"psp", 9 }, {"PSP", 9 },
14538 {"primask", 16}, {"PRIMASK", 16},
14539 {"basepri", 17}, {"BASEPRI", 17},
14540 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
14541 {"faultmask", 19}, {"FAULTMASK", 19},
14542 {"control", 20}, {"CONTROL", 20}
14543 };
14544
14545 /* Table of all shift-in-operand names. */
14546 static const struct asm_shift_name shift_names [] =
14547 {
14548 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
14549 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
14550 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
14551 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
14552 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
14553 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
14554 };
14555
14556 /* Table of all explicit relocation names. */
14557 #ifdef OBJ_ELF
14558 static struct reloc_entry reloc_names[] =
14559 {
14560 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
14561 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
14562 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
14563 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
14564 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
14565 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
14566 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
14567 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
14568 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
14569 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
14570 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
14571 };
14572 #endif
14573
14574 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14575 static const struct asm_cond conds[] =
14576 {
14577 {"eq", 0x0},
14578 {"ne", 0x1},
14579 {"cs", 0x2}, {"hs", 0x2},
14580 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14581 {"mi", 0x4},
14582 {"pl", 0x5},
14583 {"vs", 0x6},
14584 {"vc", 0x7},
14585 {"hi", 0x8},
14586 {"ls", 0x9},
14587 {"ge", 0xa},
14588 {"lt", 0xb},
14589 {"gt", 0xc},
14590 {"le", 0xd},
14591 {"al", 0xe}
14592 };
14593
14594 static struct asm_barrier_opt barrier_opt_names[] =
14595 {
14596 { "sy", 0xf },
14597 { "un", 0x7 },
14598 { "st", 0xe },
14599 { "unst", 0x6 }
14600 };
14601
14602 /* Table of ARM-format instructions. */
14603
14604 /* Macros for gluing together operand strings. N.B. In all cases
14605 other than OPS0, the trailing OP_stop comes from default
14606 zero-initialization of the unspecified elements of the array. */
14607 #define OPS0() { OP_stop, }
14608 #define OPS1(a) { OP_##a, }
14609 #define OPS2(a,b) { OP_##a,OP_##b, }
14610 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14611 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14612 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14613 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14614
14615 /* These macros abstract out the exact format of the mnemonic table and
14616 save some repeated characters. */
14617
14618 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14619 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14620 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14621 THUMB_VARIANT, do_##ae, do_##te }
14622
14623 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14624 a T_MNEM_xyz enumerator. */
14625 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14626 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14627 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14628 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14629
14630 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14631 infix after the third character. */
14632 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14633 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14634 THUMB_VARIANT, do_##ae, do_##te }
14635 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14636 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14637 THUMB_VARIANT, do_##ae, do_##te }
14638 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14639 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14640 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14641 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14642 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14643 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14644 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14645 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14646
14647 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14648 appear in the condition table. */
14649 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14650 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14651 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14652
14653 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14654 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14655 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14656 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14657 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14658 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14659 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14660 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14661 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14662 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14663 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14664 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14665 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14666 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14667 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14668 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14669 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14670 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14671 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14672 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14673
14674 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14675 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14676 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14677 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14678
14679 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14680 field is still 0xE. Many of the Thumb variants can be executed
14681 conditionally, so this is checked separately. */
14682 #define TUE(mnem, op, top, nops, ops, ae, te) \
14683 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14684 THUMB_VARIANT, do_##ae, do_##te }
14685
14686 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14687 condition code field. */
14688 #define TUF(mnem, op, top, nops, ops, ae, te) \
14689 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14690 THUMB_VARIANT, do_##ae, do_##te }
14691
14692 /* ARM-only variants of all the above. */
14693 #define CE(mnem, op, nops, ops, ae) \
14694 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14695
14696 #define C3(mnem, op, nops, ops, ae) \
14697 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14698
14699 /* Legacy mnemonics that always have conditional infix after the third
14700 character. */
14701 #define CL(mnem, op, nops, ops, ae) \
14702 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14703 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14704
14705 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14706 #define cCE(mnem, op, nops, ops, ae) \
14707 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14708
14709 /* Legacy coprocessor instructions where conditional infix and conditional
14710 suffix are ambiguous. For consistency this includes all FPA instructions,
14711 not just the potentially ambiguous ones. */
14712 #define cCL(mnem, op, nops, ops, ae) \
14713 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14714 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14715
14716 /* Coprocessor, takes either a suffix or a position-3 infix
14717 (for an FPA corner case). */
14718 #define C3E(mnem, op, nops, ops, ae) \
14719 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14720 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14721
14722 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14723 { #m1 #m2 #m3, OPS##nops ops, \
14724 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14725 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14726
14727 #define CM(m1, m2, op, nops, ops, ae) \
14728 xCM_(m1, , m2, op, nops, ops, ae), \
14729 xCM_(m1, eq, m2, op, nops, ops, ae), \
14730 xCM_(m1, ne, m2, op, nops, ops, ae), \
14731 xCM_(m1, cs, m2, op, nops, ops, ae), \
14732 xCM_(m1, hs, m2, op, nops, ops, ae), \
14733 xCM_(m1, cc, m2, op, nops, ops, ae), \
14734 xCM_(m1, ul, m2, op, nops, ops, ae), \
14735 xCM_(m1, lo, m2, op, nops, ops, ae), \
14736 xCM_(m1, mi, m2, op, nops, ops, ae), \
14737 xCM_(m1, pl, m2, op, nops, ops, ae), \
14738 xCM_(m1, vs, m2, op, nops, ops, ae), \
14739 xCM_(m1, vc, m2, op, nops, ops, ae), \
14740 xCM_(m1, hi, m2, op, nops, ops, ae), \
14741 xCM_(m1, ls, m2, op, nops, ops, ae), \
14742 xCM_(m1, ge, m2, op, nops, ops, ae), \
14743 xCM_(m1, lt, m2, op, nops, ops, ae), \
14744 xCM_(m1, gt, m2, op, nops, ops, ae), \
14745 xCM_(m1, le, m2, op, nops, ops, ae), \
14746 xCM_(m1, al, m2, op, nops, ops, ae)
14747
14748 #define UE(mnem, op, nops, ops, ae) \
14749 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14750
14751 #define UF(mnem, op, nops, ops, ae) \
14752 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14753
14754 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14755 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14756 use the same encoding function for each. */
14757 #define NUF(mnem, op, nops, ops, enc) \
14758 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14759 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14760
14761 /* Neon data processing, version which indirects through neon_enc_tab for
14762 the various overloaded versions of opcodes. */
14763 #define nUF(mnem, op, nops, ops, enc) \
14764 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14765 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14766
14767 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14768 version. */
14769 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14770 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14771 THUMB_VARIANT, do_##enc, do_##enc }
14772
14773 #define NCE(mnem, op, nops, ops, enc) \
14774 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14775
14776 #define NCEF(mnem, op, nops, ops, enc) \
14777 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14778
14779 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14780 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14781 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14782 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14783
14784 #define nCE(mnem, op, nops, ops, enc) \
14785 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14786
14787 #define nCEF(mnem, op, nops, ops, enc) \
14788 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14789
14790 #define do_0 0
14791
14792 /* Thumb-only, unconditional. */
14793 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14794
14795 static const struct asm_opcode insns[] =
14796 {
14797 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14798 #define THUMB_VARIANT &arm_ext_v4t
14799 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14800 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14801 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14802 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14803 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14804 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14805 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
14806 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
14807 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14808 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14809 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14810 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14811 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14812 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14813 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14814 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
14815
14816 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14817 for setting PSR flag bits. They are obsolete in V6 and do not
14818 have Thumb equivalents. */
14819 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14820 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14821 CL(tstp, 110f000, 2, (RR, SH), cmp),
14822 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14823 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14824 CL(cmpp, 150f000, 2, (RR, SH), cmp),
14825 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14826 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14827 CL(cmnp, 170f000, 2, (RR, SH), cmp),
14828
14829 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
14830 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
14831 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
14832 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
14833
14834 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
14835 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14836 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
14837 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14838
14839 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14840 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14841 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14842 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14843 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14844 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14845
14846 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
14847 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
14848 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
14849 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
14850
14851 /* Pseudo ops. */
14852 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
14853 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
14854 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
14855
14856 /* Thumb-compatibility pseudo ops. */
14857 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
14858 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
14859 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
14860 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
14861 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
14862 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
14863 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
14864 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
14865 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
14866 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
14867 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
14868 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
14869
14870 /* These may simplify to neg. */
14871 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
14872 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
14873
14874 #undef THUMB_VARIANT
14875 #define THUMB_VARIANT &arm_ext_v6
14876 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
14877
14878 /* V1 instructions with no Thumb analogue prior to V6T2. */
14879 #undef THUMB_VARIANT
14880 #define THUMB_VARIANT &arm_ext_v6t2
14881 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14882 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14883 CL(teqp, 130f000, 2, (RR, SH), cmp),
14884
14885 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
14886 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
14887 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
14888 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
14889
14890 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14891 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14892
14893 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14894 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14895
14896 /* V1 instructions with no Thumb analogue at all. */
14897 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
14898 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
14899
14900 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
14901 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
14902 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
14903 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
14904 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
14905 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
14906 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
14907 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
14908
14909 #undef ARM_VARIANT
14910 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14911 #undef THUMB_VARIANT
14912 #define THUMB_VARIANT &arm_ext_v4t
14913 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14914 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14915
14916 #undef THUMB_VARIANT
14917 #define THUMB_VARIANT &arm_ext_v6t2
14918 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14919 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
14920
14921 /* Generic coprocessor instructions. */
14922 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14923 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14924 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14925 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14926 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14927 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14928 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14929
14930 #undef ARM_VARIANT
14931 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14932 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14933 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14934
14935 #undef ARM_VARIANT
14936 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14937 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
14938 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
14939
14940 #undef ARM_VARIANT
14941 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14942 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14943 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14944 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14945 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14946 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14947 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14948 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14949 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14950
14951 #undef ARM_VARIANT
14952 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14953 #undef THUMB_VARIANT
14954 #define THUMB_VARIANT &arm_ext_v4t
14955 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14956 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14957 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14958 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14959 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14960 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14961
14962 #undef ARM_VARIANT
14963 #define ARM_VARIANT &arm_ext_v4t_5
14964 /* ARM Architecture 4T. */
14965 /* Note: bx (and blx) are required on V5, even if the processor does
14966 not support Thumb. */
14967 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
14968
14969 #undef ARM_VARIANT
14970 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14971 #undef THUMB_VARIANT
14972 #define THUMB_VARIANT &arm_ext_v5t
14973 /* Note: blx has 2 variants; the .value coded here is for
14974 BLX(2). Only this variant has conditional execution. */
14975 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
14976 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
14977
14978 #undef THUMB_VARIANT
14979 #define THUMB_VARIANT &arm_ext_v6t2
14980 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
14981 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14982 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14983 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14984 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14985 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14986 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14987 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14988
14989 #undef ARM_VARIANT
14990 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14991 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14992 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14993 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14994 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14995
14996 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14997 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14998
14999 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15000 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15001 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15002 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15003
15004 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15005 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15006 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15007 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15008
15009 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15010 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15011
15012 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
15013 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
15014 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
15015 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
15016
15017 #undef ARM_VARIANT
15018 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
15019 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
15020 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
15021 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
15022
15023 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15024 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15025
15026 #undef ARM_VARIANT
15027 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
15028 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
15029
15030 #undef ARM_VARIANT
15031 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
15032 #undef THUMB_VARIANT
15033 #define THUMB_VARIANT &arm_ext_v6
15034 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
15035 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
15036 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15037 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15038 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15039 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15040 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15041 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15042 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15043 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
15044
15045 #undef THUMB_VARIANT
15046 #define THUMB_VARIANT &arm_ext_v6t2
15047 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
15048 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15049 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15050
15051 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
15052 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
15053
15054 /* ARM V6 not included in V7M (eg. integer SIMD). */
15055 #undef THUMB_VARIANT
15056 #define THUMB_VARIANT &arm_ext_v6_notm
15057 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
15058 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
15059 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
15060 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15061 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15062 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15063 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15064 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15065 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15066 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15067 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15068 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15069 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15070 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15071 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15072 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15073 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15074 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15075 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15076 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15077 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15078 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15079 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15080 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15081 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15082 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15083 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15084 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15085 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15086 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15087 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15088 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15089 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15090 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15091 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15092 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15093 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15094 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15095 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15096 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15097 UF(rfeib, 9900a00, 1, (RRw), rfe),
15098 UF(rfeda, 8100a00, 1, (RRw), rfe),
15099 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15100 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15101 UF(rfefa, 9900a00, 1, (RRw), rfe),
15102 UF(rfeea, 8100a00, 1, (RRw), rfe),
15103 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15104 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15105 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15106 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15107 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15108 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15109 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15110 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15111 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15112 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15113 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15114 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15115 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15116 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15117 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15118 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15119 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15120 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15121 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15122 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15123 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15124 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15125 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15126 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15127 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15128 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15129 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15130 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15131 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
15132 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
15133 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
15134 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
15135 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
15136 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
15137 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
15138 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15139 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15140 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
15141
15142 #undef ARM_VARIANT
15143 #define ARM_VARIANT &arm_ext_v6k
15144 #undef THUMB_VARIANT
15145 #define THUMB_VARIANT &arm_ext_v6k
15146 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
15147 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
15148 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
15149 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
15150
15151 #undef THUMB_VARIANT
15152 #define THUMB_VARIANT &arm_ext_v6_notm
15153 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
15154 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
15155
15156 #undef THUMB_VARIANT
15157 #define THUMB_VARIANT &arm_ext_v6t2
15158 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15159 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15160 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15161 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15162 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
15163
15164 #undef ARM_VARIANT
15165 #define ARM_VARIANT &arm_ext_v6z
15166 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
15167
15168 #undef ARM_VARIANT
15169 #define ARM_VARIANT &arm_ext_v6t2
15170 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
15171 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
15172 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15173 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15174
15175 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15176 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
15177 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
15178 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
15179
15180 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15181 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15182 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15183 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15184
15185 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
15186 UT(cbz, b100, 2, (RR, EXP), t_cbz),
15187 /* ARM does not really have an IT instruction, so always allow it. */
15188 #undef ARM_VARIANT
15189 #define ARM_VARIANT &arm_ext_v1
15190 TUE(it, 0, bf08, 1, (COND), it, t_it),
15191 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
15192 TUE(ite, 0, bf04, 1, (COND), it, t_it),
15193 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
15194 TUE(itet, 0, bf06, 1, (COND), it, t_it),
15195 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
15196 TUE(itee, 0, bf02, 1, (COND), it, t_it),
15197 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
15198 TUE(itett, 0, bf07, 1, (COND), it, t_it),
15199 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
15200 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
15201 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
15202 TUE(itete, 0, bf05, 1, (COND), it, t_it),
15203 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
15204 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
15205
15206 /* Thumb2 only instructions. */
15207 #undef ARM_VARIANT
15208 #define ARM_VARIANT NULL
15209
15210 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15211 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15212 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
15213 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
15214
15215 /* Thumb-2 hardware division instructions (R and M profiles only). */
15216 #undef THUMB_VARIANT
15217 #define THUMB_VARIANT &arm_ext_div
15218 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15219 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15220
15221 /* ARM V7 instructions. */
15222 #undef ARM_VARIANT
15223 #define ARM_VARIANT &arm_ext_v7
15224 #undef THUMB_VARIANT
15225 #define THUMB_VARIANT &arm_ext_v7
15226 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15227 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15228 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15229 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15230 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15231
15232 #undef ARM_VARIANT
15233 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15234 cCE(wfs, e200110, 1, (RR), rd),
15235 cCE(rfs, e300110, 1, (RR), rd),
15236 cCE(wfc, e400110, 1, (RR), rd),
15237 cCE(rfc, e500110, 1, (RR), rd),
15238
15239 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15240 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15241 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15242 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15243
15244 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15245 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15246 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15247 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15248
15249 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15250 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15251 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15252 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15253 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15254 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15255 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15256 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15257 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15258 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15259 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15260 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15261
15262 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15263 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15264 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15265 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15266 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15267 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15268 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15269 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15270 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15271 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15272 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15273 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15274
15275 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15276 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15277 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15278 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15279 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15280 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15281 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15282 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15283 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15284 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15285 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15286 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15287
15288 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15289 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15290 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15291 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15292 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15293 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15294 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15295 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15296 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15297 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15298 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15299 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15300
15301 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15302 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15303 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15304 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15305 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15306 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15307 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15308 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15309 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15310 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15311 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15312 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15313
15314 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15315 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15316 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15317 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15318 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15319 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15320 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15321 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15322 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15323 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15324 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15325 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15326
15327 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15328 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15329 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15330 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15331 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15332 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15333 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15334 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15335 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15336 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15337 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15338 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15339
15340 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15341 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15342 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15343 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15344 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15345 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15346 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15347 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15348 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15349 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15350 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15351 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15352
15353 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15354 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15355 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15356 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15357 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15358 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15359 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15360 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15361 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15362 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15363 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15364 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15365
15366 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15367 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15368 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15369 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15370 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15371 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
15372 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
15373 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
15374 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
15375 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
15376 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
15377 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
15378
15379 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
15380 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
15381 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
15382 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
15383 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
15384 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
15385 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
15386 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
15387 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
15388 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
15389 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
15390 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
15391
15392 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
15393 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
15394 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
15395 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
15396 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
15397 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
15398 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
15399 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
15400 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
15401 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
15402 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
15403 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
15404
15405 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
15406 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
15407 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
15408 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
15409 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
15410 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
15411 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
15412 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
15413 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
15414 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
15415 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
15416 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
15417
15418 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
15419 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
15420 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
15421 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
15422 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
15423 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
15424 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
15425 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
15426 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
15427 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
15428 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
15429 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
15430
15431 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
15432 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
15433 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
15434 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
15435 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
15436 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
15437 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
15438 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
15439 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
15440 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
15441 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
15442 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
15443
15444 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
15445 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
15446 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
15447 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
15448 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
15449 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
15450 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
15451 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
15452 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
15453 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
15454 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
15455 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
15456
15457 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
15458 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
15459 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
15460 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
15461 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
15462 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15463 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15464 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15465 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
15466 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
15467 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
15468 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
15469
15470 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
15471 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
15472 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
15473 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
15474 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
15475 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15476 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15477 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15478 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
15479 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
15480 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
15481 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
15482
15483 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
15484 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
15485 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
15486 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
15487 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
15488 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15489 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15490 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15491 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
15492 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
15493 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
15494 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
15495
15496 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
15497 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
15498 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
15499 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
15500 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
15501 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15502 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15503 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15504 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
15505 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
15506 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
15507 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
15508
15509 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
15510 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
15511 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
15512 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
15513 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
15514 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15515 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15516 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15517 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
15518 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
15519 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
15520 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
15521
15522 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
15523 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
15524 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
15525 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
15526 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
15527 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15528 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15529 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15530 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
15531 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
15532 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
15533 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
15534
15535 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
15536 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
15537 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
15538 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
15539 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
15540 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15541 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15542 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15543 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
15544 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
15545 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
15546 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
15547
15548 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
15549 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
15550 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
15551 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
15552 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
15553 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15554 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15555 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15556 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
15557 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
15558 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
15559 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
15560
15561 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
15562 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
15563 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
15564 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
15565 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
15566 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15567 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15568 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15569 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
15570 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
15571 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
15572 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
15573
15574 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
15575 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
15576 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
15577 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
15578 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
15579 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15580 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15581 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15582 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
15583 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
15584 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
15585 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
15586
15587 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15588 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15589 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15590 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15591 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15592 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15593 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15594 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15595 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15596 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15597 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15598 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15599
15600 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15601 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15602 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15603 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15604 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15605 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15606 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15607 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15608 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15609 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15610 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15611 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15612
15613 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15614 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15615 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15616 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15617 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15618 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15619 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15620 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15621 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15622 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15623 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15624 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15625
15626 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
15627 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
15628 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
15629 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
15630
15631 cCL(flts, e000110, 2, (RF, RR), rn_rd),
15632 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
15633 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
15634 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
15635 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
15636 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
15637 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
15638 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
15639 cCL(flte, e080110, 2, (RF, RR), rn_rd),
15640 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
15641 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
15642 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
15643
15644 /* The implementation of the FIX instruction is broken on some
15645 assemblers, in that it accepts a precision specifier as well as a
15646 rounding specifier, despite the fact that this is meaningless.
15647 To be more compatible, we accept it as well, though of course it
15648 does not set any bits. */
15649 cCE(fix, e100110, 2, (RR, RF), rd_rm),
15650 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
15651 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
15652 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
15653 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
15654 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
15655 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
15656 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
15657 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
15658 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
15659 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
15660 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
15661 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
15662
15663 /* Instructions that were new with the real FPA, call them V2. */
15664 #undef ARM_VARIANT
15665 #define ARM_VARIANT &fpu_fpa_ext_v2
15666 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15667 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15668 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15669 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15670 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15671 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15672
15673 #undef ARM_VARIANT
15674 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15675 /* Moves and type conversions. */
15676 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
15677 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
15678 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
15679 cCE(fmstat, ef1fa10, 0, (), noargs),
15680 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
15681 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
15682 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
15683 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15684 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
15685 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15686 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
15687 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
15688
15689 /* Memory operations. */
15690 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15691 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15692 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15693 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15694 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15695 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15696 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15697 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15698 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15699 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15700 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15701 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15702 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15703 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15704 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15705 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15706 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15707 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15708
15709 /* Monadic operations. */
15710 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
15711 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
15712 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
15713
15714 /* Dyadic operations. */
15715 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15716 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15717 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15718 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15719 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15720 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15721 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15722 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15723 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15724
15725 /* Comparisons. */
15726 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15727 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15728 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15729 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15730
15731 #undef ARM_VARIANT
15732 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15733 /* Moves and type conversions. */
15734 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15735 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15736 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15737 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15738 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15739 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15740 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15741 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15742 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15743 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15744 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15745 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15746 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15747
15748 /* Memory operations. */
15749 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15750 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15751 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15752 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15753 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15754 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15755 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15756 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15757 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15758 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15759
15760 /* Monadic operations. */
15761 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15762 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15763 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15764
15765 /* Dyadic operations. */
15766 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15767 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15768 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15769 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15770 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15771 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15772 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15773 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15774 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15775
15776 /* Comparisons. */
15777 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15778 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15779 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15780 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15781
15782 #undef ARM_VARIANT
15783 #define ARM_VARIANT &fpu_vfp_ext_v2
15784 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15785 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15786 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15787 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15788
15789 /* Instructions which may belong to either the Neon or VFP instruction sets.
15790 Individual encoder functions perform additional architecture checks. */
15791 #undef ARM_VARIANT
15792 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15793 #undef THUMB_VARIANT
15794 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15795 /* These mnemonics are unique to VFP. */
15796 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15797 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15798 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15799 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15800 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15801 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15802 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15803 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15804 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15805 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
15806
15807 /* Mnemonics shared by Neon and VFP. */
15808 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
15809 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15810 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15811
15812 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15813 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15814
15815 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15816 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15817
15818 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15819 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15820 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15821 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15822 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15823 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15824 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15825 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15826
15827 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
15828
15829 /* NOTE: All VMOV encoding is special-cased! */
15830 NCE(vmov, 0, 1, (VMOV), neon_mov),
15831 NCE(vmovq, 0, 1, (VMOV), neon_mov),
15832
15833 #undef THUMB_VARIANT
15834 #define THUMB_VARIANT &fpu_neon_ext_v1
15835 #undef ARM_VARIANT
15836 #define ARM_VARIANT &fpu_neon_ext_v1
15837 /* Data processing with three registers of the same length. */
15838 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15839 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
15840 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
15841 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15842 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15843 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15844 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15845 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15846 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15847 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15848 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15849 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15850 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15851 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15852 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15853 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15854 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15855 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15856 /* If not immediate, fall back to neon_dyadic_i64_su.
15857 shl_imm should accept I8 I16 I32 I64,
15858 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15859 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
15860 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
15861 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
15862 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
15863 /* Logic ops, types optional & ignored. */
15864 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
15865 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
15866 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
15867 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
15868 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
15869 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
15870 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
15871 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
15872 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
15873 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
15874 /* Bitfield ops, untyped. */
15875 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15876 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15877 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15878 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15879 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15880 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15881 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15882 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15883 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15884 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15885 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15886 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15887 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15888 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15889 back to neon_dyadic_if_su. */
15890 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15891 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15892 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15893 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15894 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15895 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15896 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15897 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15898 /* Comparison. Type I8 I16 I32 F32. */
15899 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
15900 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
15901 /* As above, D registers only. */
15902 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15903 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15904 /* Int and float variants, signedness unimportant. */
15905 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15906 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15907 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
15908 /* Add/sub take types I8 I16 I32 I64 F32. */
15909 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15910 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15911 /* vtst takes sizes 8, 16, 32. */
15912 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
15913 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
15914 /* VMUL takes I8 I16 I32 F32 P8. */
15915 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
15916 /* VQD{R}MULH takes S16 S32. */
15917 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15918 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15919 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15920 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15921 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15922 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15923 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15924 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15925 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15926 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15927 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15928 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15929 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15930 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15931 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15932 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15933
15934 /* Two address, int/float. Types S8 S16 S32 F32. */
15935 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
15936 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
15937
15938 /* Data processing with two registers and a shift amount. */
15939 /* Right shifts, and variants with rounding.
15940 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15941 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15942 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15943 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15944 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15945 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15946 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15947 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15948 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15949 /* Shift and insert. Sizes accepted 8 16 32 64. */
15950 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
15951 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
15952 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
15953 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
15954 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15955 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
15956 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
15957 /* Right shift immediate, saturating & narrowing, with rounding variants.
15958 Types accepted S16 S32 S64 U16 U32 U64. */
15959 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15960 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15961 /* As above, unsigned. Types accepted S16 S32 S64. */
15962 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15963 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15964 /* Right shift narrowing. Types accepted I16 I32 I64. */
15965 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15966 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15967 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15968 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
15969 /* CVT with optional immediate for fixed-point variant. */
15970 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
15971
15972 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
15973 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
15974
15975 /* Data processing, three registers of different lengths. */
15976 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15977 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
15978 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
15979 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
15980 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
15981 /* If not scalar, fall back to neon_dyadic_long.
15982 Vector types as above, scalar types S16 S32 U16 U32. */
15983 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15984 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15985 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15986 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15987 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15988 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15989 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15990 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15991 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15992 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15993 /* Saturating doubling multiplies. Types S16 S32. */
15994 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15995 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15996 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15997 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15998 S16 S32 U16 U32. */
15999 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
16000
16001 /* Extract. Size 8. */
16002 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
16003 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
16004
16005 /* Two registers, miscellaneous. */
16006 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
16007 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
16008 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
16009 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
16010 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
16011 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
16012 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
16013 /* Vector replicate. Sizes 8 16 32. */
16014 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
16015 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
16016 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
16017 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
16018 /* VMOVN. Types I16 I32 I64. */
16019 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
16020 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
16021 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
16022 /* VQMOVUN. Types S16 S32 S64. */
16023 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
16024 /* VZIP / VUZP. Sizes 8 16 32. */
16025 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
16026 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
16027 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
16028 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
16029 /* VQABS / VQNEG. Types S8 S16 S32. */
16030 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16031 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
16032 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16033 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
16034 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
16035 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
16036 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
16037 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
16038 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
16039 /* Reciprocal estimates. Types U32 F32. */
16040 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
16041 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
16042 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
16043 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
16044 /* VCLS. Types S8 S16 S32. */
16045 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
16046 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
16047 /* VCLZ. Types I8 I16 I32. */
16048 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
16049 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
16050 /* VCNT. Size 8. */
16051 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
16052 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
16053 /* Two address, untyped. */
16054 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
16055 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
16056 /* VTRN. Sizes 8 16 32. */
16057 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
16058 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
16059
16060 /* Table lookup. Size 8. */
16061 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16062 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16063
16064 #undef THUMB_VARIANT
16065 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
16066 #undef ARM_VARIANT
16067 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
16068 /* Neon element/structure load/store. */
16069 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16070 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16071 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16072 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16073 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16074 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16075 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16076 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16077
16078 #undef THUMB_VARIANT
16079 #define THUMB_VARIANT &fpu_vfp_ext_v3
16080 #undef ARM_VARIANT
16081 #define ARM_VARIANT &fpu_vfp_ext_v3
16082 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
16083 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
16084 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16085 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16086 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16087 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16088 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16089 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16090 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16091 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16092 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16093 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16094 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16095 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16096 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16097 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16098 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16099 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16100
16101 #undef THUMB_VARIANT
16102 #undef ARM_VARIANT
16103 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
16104 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16105 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16106 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16107 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16108 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16109 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16110 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
16111 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
16112
16113 #undef ARM_VARIANT
16114 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16115 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
16116 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
16117 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
16118 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
16119 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
16120 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
16121 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
16122 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
16123 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
16124 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16125 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16126 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16127 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16128 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16129 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16130 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16131 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16132 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16133 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
16134 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
16135 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16136 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16137 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16138 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16139 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16140 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16141 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
16142 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
16143 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
16144 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
16145 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
16146 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
16147 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
16148 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
16149 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
16150 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
16151 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
16152 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16153 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16154 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16155 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16156 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16157 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16158 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16159 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16160 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16161 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
16162 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16163 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16164 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16165 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16166 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16167 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16168 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16169 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16170 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16171 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16172 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16173 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16174 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16175 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16176 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16177 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16178 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16179 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16180 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16181 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16182 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16183 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16184 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16185 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16186 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16187 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16188 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16189 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16190 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16191 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16192 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16193 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16194 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16195 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16196 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16197 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16198 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16199 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16200 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16201 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16202 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16203 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
16204 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16205 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16206 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16207 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16208 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16209 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16210 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16211 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16212 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16213 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16214 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16215 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16216 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16217 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16218 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16219 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16220 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16221 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16222 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16223 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16224 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16225 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16226 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16227 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16228 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16229 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16230 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16231 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16232 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16233 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16234 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16235 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16236 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16237 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16238 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16239 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16240 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16241 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16242 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16243 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16244 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16245 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16246 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16247 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16248 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16249 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16250 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16251 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16252 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16253 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16254 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16255 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16256 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16257 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16258 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16259 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16260 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16261 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16262 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16263 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16264 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16265 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16266 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16267 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16268 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16269 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16270 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16271 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16272 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16273 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16274 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16275 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16276 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16277
16278 #undef ARM_VARIANT
16279 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16280 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16281 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16282 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16283 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16284 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16285 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16286 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16287 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16288 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16289 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16290 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16291 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16292 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16293 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16294 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16295 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16296 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16297 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16298 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16299 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16300 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16301 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16302 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16303 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16304 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16305 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16306 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16307 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16308 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16309 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16310 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16311 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16312 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16313 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16314 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16315 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16316 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16317 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16318 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16319 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16320 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16321 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16322 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16323 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16324 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16325 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16326 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16327 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16328 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16329 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16330 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16331 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16332 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16333 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16334 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16335 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16336 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16337
16338 #undef ARM_VARIANT
16339 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16340 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16341 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16342 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16343 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16344 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16345 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16346 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16347 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16348 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16349 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16350 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16351 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16352 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16353 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16354 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16355 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16356 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16357 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16358 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16359 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16360 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16361 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16362 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16363 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16364 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16365 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16366 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16367 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16368 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
16369 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
16370 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
16371 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
16372 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
16373 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
16374 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
16375 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
16376 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
16377 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
16378 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
16379 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
16380 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
16381 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
16382 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
16383 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
16384 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
16385 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
16386 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
16387 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
16388 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
16389 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
16390 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
16391 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
16392 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
16393 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
16394 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
16395 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
16396 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
16397 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
16398 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
16399 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
16400 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
16401 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
16402 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
16403 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
16404 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16405 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16406 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16407 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16408 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16409 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16410 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16411 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16412 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16413 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16414 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16415 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16416 };
16417 #undef ARM_VARIANT
16418 #undef THUMB_VARIANT
16419 #undef TCE
16420 #undef TCM
16421 #undef TUE
16422 #undef TUF
16423 #undef TCC
16424 #undef cCE
16425 #undef cCL
16426 #undef C3E
16427 #undef CE
16428 #undef CM
16429 #undef UE
16430 #undef UF
16431 #undef UT
16432 #undef NUF
16433 #undef nUF
16434 #undef NCE
16435 #undef nCE
16436 #undef OPS0
16437 #undef OPS1
16438 #undef OPS2
16439 #undef OPS3
16440 #undef OPS4
16441 #undef OPS5
16442 #undef OPS6
16443 #undef do_0
16444 \f
16445 /* MD interface: bits in the object file. */
16446
16447 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16448 for use in the a.out file, and stores them in the array pointed to by buf.
16449 This knows about the endian-ness of the target machine and does
16450 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16451 2 (short) and 4 (long) Floating numbers are put out as a series of
16452 LITTLENUMS (shorts, here at least). */
16453
16454 void
16455 md_number_to_chars (char * buf, valueT val, int n)
16456 {
16457 if (target_big_endian)
16458 number_to_chars_bigendian (buf, val, n);
16459 else
16460 number_to_chars_littleendian (buf, val, n);
16461 }
16462
16463 static valueT
16464 md_chars_to_number (char * buf, int n)
16465 {
16466 valueT result = 0;
16467 unsigned char * where = (unsigned char *) buf;
16468
16469 if (target_big_endian)
16470 {
16471 while (n--)
16472 {
16473 result <<= 8;
16474 result |= (*where++ & 255);
16475 }
16476 }
16477 else
16478 {
16479 while (n--)
16480 {
16481 result <<= 8;
16482 result |= (where[n] & 255);
16483 }
16484 }
16485
16486 return result;
16487 }
16488
16489 /* MD interface: Sections. */
16490
16491 /* Estimate the size of a frag before relaxing. Assume everything fits in
16492 2 bytes. */
16493
16494 int
16495 md_estimate_size_before_relax (fragS * fragp,
16496 segT segtype ATTRIBUTE_UNUSED)
16497 {
16498 fragp->fr_var = 2;
16499 return 2;
16500 }
16501
16502 /* Convert a machine dependent frag. */
16503
16504 void
16505 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
16506 {
16507 unsigned long insn;
16508 unsigned long old_op;
16509 char *buf;
16510 expressionS exp;
16511 fixS *fixp;
16512 int reloc_type;
16513 int pc_rel;
16514 int opcode;
16515
16516 buf = fragp->fr_literal + fragp->fr_fix;
16517
16518 old_op = bfd_get_16(abfd, buf);
16519 if (fragp->fr_symbol) {
16520 exp.X_op = O_symbol;
16521 exp.X_add_symbol = fragp->fr_symbol;
16522 } else {
16523 exp.X_op = O_constant;
16524 }
16525 exp.X_add_number = fragp->fr_offset;
16526 opcode = fragp->fr_subtype;
16527 switch (opcode)
16528 {
16529 case T_MNEM_ldr_pc:
16530 case T_MNEM_ldr_pc2:
16531 case T_MNEM_ldr_sp:
16532 case T_MNEM_str_sp:
16533 case T_MNEM_ldr:
16534 case T_MNEM_ldrb:
16535 case T_MNEM_ldrh:
16536 case T_MNEM_str:
16537 case T_MNEM_strb:
16538 case T_MNEM_strh:
16539 if (fragp->fr_var == 4)
16540 {
16541 insn = THUMB_OP32(opcode);
16542 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
16543 {
16544 insn |= (old_op & 0x700) << 4;
16545 }
16546 else
16547 {
16548 insn |= (old_op & 7) << 12;
16549 insn |= (old_op & 0x38) << 13;
16550 }
16551 insn |= 0x00000c00;
16552 put_thumb32_insn (buf, insn);
16553 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
16554 }
16555 else
16556 {
16557 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
16558 }
16559 pc_rel = (opcode == T_MNEM_ldr_pc2);
16560 break;
16561 case T_MNEM_adr:
16562 if (fragp->fr_var == 4)
16563 {
16564 insn = THUMB_OP32 (opcode);
16565 insn |= (old_op & 0xf0) << 4;
16566 put_thumb32_insn (buf, insn);
16567 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
16568 }
16569 else
16570 {
16571 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16572 exp.X_add_number -= 4;
16573 }
16574 pc_rel = 1;
16575 break;
16576 case T_MNEM_mov:
16577 case T_MNEM_movs:
16578 case T_MNEM_cmp:
16579 case T_MNEM_cmn:
16580 if (fragp->fr_var == 4)
16581 {
16582 int r0off = (opcode == T_MNEM_mov
16583 || opcode == T_MNEM_movs) ? 0 : 8;
16584 insn = THUMB_OP32 (opcode);
16585 insn = (insn & 0xe1ffffff) | 0x10000000;
16586 insn |= (old_op & 0x700) << r0off;
16587 put_thumb32_insn (buf, insn);
16588 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16589 }
16590 else
16591 {
16592 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
16593 }
16594 pc_rel = 0;
16595 break;
16596 case T_MNEM_b:
16597 if (fragp->fr_var == 4)
16598 {
16599 insn = THUMB_OP32(opcode);
16600 put_thumb32_insn (buf, insn);
16601 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
16602 }
16603 else
16604 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
16605 pc_rel = 1;
16606 break;
16607 case T_MNEM_bcond:
16608 if (fragp->fr_var == 4)
16609 {
16610 insn = THUMB_OP32(opcode);
16611 insn |= (old_op & 0xf00) << 14;
16612 put_thumb32_insn (buf, insn);
16613 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
16614 }
16615 else
16616 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
16617 pc_rel = 1;
16618 break;
16619 case T_MNEM_add_sp:
16620 case T_MNEM_add_pc:
16621 case T_MNEM_inc_sp:
16622 case T_MNEM_dec_sp:
16623 if (fragp->fr_var == 4)
16624 {
16625 /* ??? Choose between add and addw. */
16626 insn = THUMB_OP32 (opcode);
16627 insn |= (old_op & 0xf0) << 4;
16628 put_thumb32_insn (buf, insn);
16629 if (opcode == T_MNEM_add_pc)
16630 reloc_type = BFD_RELOC_ARM_T32_IMM12;
16631 else
16632 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16633 }
16634 else
16635 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16636 pc_rel = 0;
16637 break;
16638
16639 case T_MNEM_addi:
16640 case T_MNEM_addis:
16641 case T_MNEM_subi:
16642 case T_MNEM_subis:
16643 if (fragp->fr_var == 4)
16644 {
16645 insn = THUMB_OP32 (opcode);
16646 insn |= (old_op & 0xf0) << 4;
16647 insn |= (old_op & 0xf) << 16;
16648 put_thumb32_insn (buf, insn);
16649 if (insn & (1 << 20))
16650 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16651 else
16652 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16653 }
16654 else
16655 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16656 pc_rel = 0;
16657 break;
16658 default:
16659 abort();
16660 }
16661 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
16662 reloc_type);
16663 fixp->fx_file = fragp->fr_file;
16664 fixp->fx_line = fragp->fr_line;
16665 fragp->fr_fix += fragp->fr_var;
16666 }
16667
16668 /* Return the size of a relaxable immediate operand instruction.
16669 SHIFT and SIZE specify the form of the allowable immediate. */
16670 static int
16671 relax_immediate (fragS *fragp, int size, int shift)
16672 {
16673 offsetT offset;
16674 offsetT mask;
16675 offsetT low;
16676
16677 /* ??? Should be able to do better than this. */
16678 if (fragp->fr_symbol)
16679 return 4;
16680
16681 low = (1 << shift) - 1;
16682 mask = (1 << (shift + size)) - (1 << shift);
16683 offset = fragp->fr_offset;
16684 /* Force misaligned offsets to 32-bit variant. */
16685 if (offset & low)
16686 return 4;
16687 if (offset & ~mask)
16688 return 4;
16689 return 2;
16690 }
16691
16692 /* Get the address of a symbol during relaxation. */
16693 static addressT
16694 relaxed_symbol_addr(fragS *fragp, long stretch)
16695 {
16696 fragS *sym_frag;
16697 addressT addr;
16698 symbolS *sym;
16699
16700 sym = fragp->fr_symbol;
16701 sym_frag = symbol_get_frag (sym);
16702 know (S_GET_SEGMENT (sym) != absolute_section
16703 || sym_frag == &zero_address_frag);
16704 addr = S_GET_VALUE (sym) + fragp->fr_offset;
16705
16706 /* If frag has yet to be reached on this pass, assume it will
16707 move by STRETCH just as we did. If this is not so, it will
16708 be because some frag between grows, and that will force
16709 another pass. */
16710
16711 if (stretch != 0
16712 && sym_frag->relax_marker != fragp->relax_marker)
16713 addr += stretch;
16714
16715 return addr;
16716 }
16717
16718 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16719 load. */
16720 static int
16721 relax_adr (fragS *fragp, asection *sec, long stretch)
16722 {
16723 addressT addr;
16724 offsetT val;
16725
16726 /* Assume worst case for symbols not known to be in the same section. */
16727 if (!S_IS_DEFINED(fragp->fr_symbol)
16728 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16729 return 4;
16730
16731 val = relaxed_symbol_addr(fragp, stretch);
16732 addr = fragp->fr_address + fragp->fr_fix;
16733 addr = (addr + 4) & ~3;
16734 /* Force misaligned targets to 32-bit variant. */
16735 if (val & 3)
16736 return 4;
16737 val -= addr;
16738 if (val < 0 || val > 1020)
16739 return 4;
16740 return 2;
16741 }
16742
16743 /* Return the size of a relaxable add/sub immediate instruction. */
16744 static int
16745 relax_addsub (fragS *fragp, asection *sec)
16746 {
16747 char *buf;
16748 int op;
16749
16750 buf = fragp->fr_literal + fragp->fr_fix;
16751 op = bfd_get_16(sec->owner, buf);
16752 if ((op & 0xf) == ((op >> 4) & 0xf))
16753 return relax_immediate (fragp, 8, 0);
16754 else
16755 return relax_immediate (fragp, 3, 0);
16756 }
16757
16758
16759 /* Return the size of a relaxable branch instruction. BITS is the
16760 size of the offset field in the narrow instruction. */
16761
16762 static int
16763 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
16764 {
16765 addressT addr;
16766 offsetT val;
16767 offsetT limit;
16768
16769 /* Assume worst case for symbols not known to be in the same section. */
16770 if (!S_IS_DEFINED(fragp->fr_symbol)
16771 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16772 return 4;
16773
16774 val = relaxed_symbol_addr(fragp, stretch);
16775 addr = fragp->fr_address + fragp->fr_fix + 4;
16776 val -= addr;
16777
16778 /* Offset is a signed value *2 */
16779 limit = 1 << bits;
16780 if (val >= limit || val < -limit)
16781 return 4;
16782 return 2;
16783 }
16784
16785
16786 /* Relax a machine dependent frag. This returns the amount by which
16787 the current size of the frag should change. */
16788
16789 int
16790 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
16791 {
16792 int oldsize;
16793 int newsize;
16794
16795 oldsize = fragp->fr_var;
16796 switch (fragp->fr_subtype)
16797 {
16798 case T_MNEM_ldr_pc2:
16799 newsize = relax_adr(fragp, sec, stretch);
16800 break;
16801 case T_MNEM_ldr_pc:
16802 case T_MNEM_ldr_sp:
16803 case T_MNEM_str_sp:
16804 newsize = relax_immediate(fragp, 8, 2);
16805 break;
16806 case T_MNEM_ldr:
16807 case T_MNEM_str:
16808 newsize = relax_immediate(fragp, 5, 2);
16809 break;
16810 case T_MNEM_ldrh:
16811 case T_MNEM_strh:
16812 newsize = relax_immediate(fragp, 5, 1);
16813 break;
16814 case T_MNEM_ldrb:
16815 case T_MNEM_strb:
16816 newsize = relax_immediate(fragp, 5, 0);
16817 break;
16818 case T_MNEM_adr:
16819 newsize = relax_adr(fragp, sec, stretch);
16820 break;
16821 case T_MNEM_mov:
16822 case T_MNEM_movs:
16823 case T_MNEM_cmp:
16824 case T_MNEM_cmn:
16825 newsize = relax_immediate(fragp, 8, 0);
16826 break;
16827 case T_MNEM_b:
16828 newsize = relax_branch(fragp, sec, 11, stretch);
16829 break;
16830 case T_MNEM_bcond:
16831 newsize = relax_branch(fragp, sec, 8, stretch);
16832 break;
16833 case T_MNEM_add_sp:
16834 case T_MNEM_add_pc:
16835 newsize = relax_immediate (fragp, 8, 2);
16836 break;
16837 case T_MNEM_inc_sp:
16838 case T_MNEM_dec_sp:
16839 newsize = relax_immediate (fragp, 7, 2);
16840 break;
16841 case T_MNEM_addi:
16842 case T_MNEM_addis:
16843 case T_MNEM_subi:
16844 case T_MNEM_subis:
16845 newsize = relax_addsub (fragp, sec);
16846 break;
16847 default:
16848 abort();
16849 }
16850
16851 fragp->fr_var = newsize;
16852 /* Freeze wide instructions that are at or before the same location as
16853 in the previous pass. This avoids infinite loops.
16854 Don't freeze them unconditionally because targets may be artificialy
16855 misaligned by the expansion of preceeding frags. */
16856 if (stretch <= 0 && newsize > 2)
16857 {
16858 md_convert_frag (sec->owner, sec, fragp);
16859 frag_wane(fragp);
16860 }
16861
16862 return newsize - oldsize;
16863 }
16864
16865 /* Round up a section size to the appropriate boundary. */
16866
16867 valueT
16868 md_section_align (segT segment ATTRIBUTE_UNUSED,
16869 valueT size)
16870 {
16871 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16872 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
16873 {
16874 /* For a.out, force the section size to be aligned. If we don't do
16875 this, BFD will align it for us, but it will not write out the
16876 final bytes of the section. This may be a bug in BFD, but it is
16877 easier to fix it here since that is how the other a.out targets
16878 work. */
16879 int align;
16880
16881 align = bfd_get_section_alignment (stdoutput, segment);
16882 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
16883 }
16884 #endif
16885
16886 return size;
16887 }
16888
16889 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16890 of an rs_align_code fragment. */
16891
16892 void
16893 arm_handle_align (fragS * fragP)
16894 {
16895 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16896 static char const thumb_noop[2] = { 0xc0, 0x46 };
16897 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16898 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
16899
16900 int bytes, fix, noop_size;
16901 char * p;
16902 const char * noop;
16903
16904 if (fragP->fr_type != rs_align_code)
16905 return;
16906
16907 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
16908 p = fragP->fr_literal + fragP->fr_fix;
16909 fix = 0;
16910
16911 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
16912 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
16913
16914 if (fragP->tc_frag_data)
16915 {
16916 if (target_big_endian)
16917 noop = thumb_bigend_noop;
16918 else
16919 noop = thumb_noop;
16920 noop_size = sizeof (thumb_noop);
16921 }
16922 else
16923 {
16924 if (target_big_endian)
16925 noop = arm_bigend_noop;
16926 else
16927 noop = arm_noop;
16928 noop_size = sizeof (arm_noop);
16929 }
16930
16931 if (bytes & (noop_size - 1))
16932 {
16933 fix = bytes & (noop_size - 1);
16934 memset (p, 0, fix);
16935 p += fix;
16936 bytes -= fix;
16937 }
16938
16939 while (bytes >= noop_size)
16940 {
16941 memcpy (p, noop, noop_size);
16942 p += noop_size;
16943 bytes -= noop_size;
16944 fix += noop_size;
16945 }
16946
16947 fragP->fr_fix += fix;
16948 fragP->fr_var = noop_size;
16949 }
16950
16951 /* Called from md_do_align. Used to create an alignment
16952 frag in a code section. */
16953
16954 void
16955 arm_frag_align_code (int n, int max)
16956 {
16957 char * p;
16958
16959 /* We assume that there will never be a requirement
16960 to support alignments greater than 32 bytes. */
16961 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
16962 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16963
16964 p = frag_var (rs_align_code,
16965 MAX_MEM_FOR_RS_ALIGN_CODE,
16966 1,
16967 (relax_substateT) max,
16968 (symbolS *) NULL,
16969 (offsetT) n,
16970 (char *) NULL);
16971 *p = 0;
16972 }
16973
16974 /* Perform target specific initialisation of a frag. */
16975
16976 void
16977 arm_init_frag (fragS * fragP)
16978 {
16979 /* Record whether this frag is in an ARM or a THUMB area. */
16980 fragP->tc_frag_data = thumb_mode;
16981 }
16982
16983 #ifdef OBJ_ELF
16984 /* When we change sections we need to issue a new mapping symbol. */
16985
16986 void
16987 arm_elf_change_section (void)
16988 {
16989 flagword flags;
16990 segment_info_type *seginfo;
16991
16992 /* Link an unlinked unwind index table section to the .text section. */
16993 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
16994 && elf_linked_to_section (now_seg) == NULL)
16995 elf_linked_to_section (now_seg) = text_section;
16996
16997 if (!SEG_NORMAL (now_seg))
16998 return;
16999
17000 flags = bfd_get_section_flags (stdoutput, now_seg);
17001
17002 /* We can ignore sections that only contain debug info. */
17003 if ((flags & SEC_ALLOC) == 0)
17004 return;
17005
17006 seginfo = seg_info (now_seg);
17007 mapstate = seginfo->tc_segment_info_data.mapstate;
17008 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
17009 }
17010
17011 int
17012 arm_elf_section_type (const char * str, size_t len)
17013 {
17014 if (len == 5 && strncmp (str, "exidx", 5) == 0)
17015 return SHT_ARM_EXIDX;
17016
17017 return -1;
17018 }
17019 \f
17020 /* Code to deal with unwinding tables. */
17021
17022 static void add_unwind_adjustsp (offsetT);
17023
17024 /* Cenerate and deferred unwind frame offset. */
17025
17026 static void
17027 flush_pending_unwind (void)
17028 {
17029 offsetT offset;
17030
17031 offset = unwind.pending_offset;
17032 unwind.pending_offset = 0;
17033 if (offset != 0)
17034 add_unwind_adjustsp (offset);
17035 }
17036
17037 /* Add an opcode to this list for this function. Two-byte opcodes should
17038 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
17039 order. */
17040
17041 static void
17042 add_unwind_opcode (valueT op, int length)
17043 {
17044 /* Add any deferred stack adjustment. */
17045 if (unwind.pending_offset)
17046 flush_pending_unwind ();
17047
17048 unwind.sp_restored = 0;
17049
17050 if (unwind.opcode_count + length > unwind.opcode_alloc)
17051 {
17052 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
17053 if (unwind.opcodes)
17054 unwind.opcodes = xrealloc (unwind.opcodes,
17055 unwind.opcode_alloc);
17056 else
17057 unwind.opcodes = xmalloc (unwind.opcode_alloc);
17058 }
17059 while (length > 0)
17060 {
17061 length--;
17062 unwind.opcodes[unwind.opcode_count] = op & 0xff;
17063 op >>= 8;
17064 unwind.opcode_count++;
17065 }
17066 }
17067
17068 /* Add unwind opcodes to adjust the stack pointer. */
17069
17070 static void
17071 add_unwind_adjustsp (offsetT offset)
17072 {
17073 valueT op;
17074
17075 if (offset > 0x200)
17076 {
17077 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
17078 char bytes[5];
17079 int n;
17080 valueT o;
17081
17082 /* Long form: 0xb2, uleb128. */
17083 /* This might not fit in a word so add the individual bytes,
17084 remembering the list is built in reverse order. */
17085 o = (valueT) ((offset - 0x204) >> 2);
17086 if (o == 0)
17087 add_unwind_opcode (0, 1);
17088
17089 /* Calculate the uleb128 encoding of the offset. */
17090 n = 0;
17091 while (o)
17092 {
17093 bytes[n] = o & 0x7f;
17094 o >>= 7;
17095 if (o)
17096 bytes[n] |= 0x80;
17097 n++;
17098 }
17099 /* Add the insn. */
17100 for (; n; n--)
17101 add_unwind_opcode (bytes[n - 1], 1);
17102 add_unwind_opcode (0xb2, 1);
17103 }
17104 else if (offset > 0x100)
17105 {
17106 /* Two short opcodes. */
17107 add_unwind_opcode (0x3f, 1);
17108 op = (offset - 0x104) >> 2;
17109 add_unwind_opcode (op, 1);
17110 }
17111 else if (offset > 0)
17112 {
17113 /* Short opcode. */
17114 op = (offset - 4) >> 2;
17115 add_unwind_opcode (op, 1);
17116 }
17117 else if (offset < 0)
17118 {
17119 offset = -offset;
17120 while (offset > 0x100)
17121 {
17122 add_unwind_opcode (0x7f, 1);
17123 offset -= 0x100;
17124 }
17125 op = ((offset - 4) >> 2) | 0x40;
17126 add_unwind_opcode (op, 1);
17127 }
17128 }
17129
17130 /* Finish the list of unwind opcodes for this function. */
17131 static void
17132 finish_unwind_opcodes (void)
17133 {
17134 valueT op;
17135
17136 if (unwind.fp_used)
17137 {
17138 /* Adjust sp as necessary. */
17139 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
17140 flush_pending_unwind ();
17141
17142 /* After restoring sp from the frame pointer. */
17143 op = 0x90 | unwind.fp_reg;
17144 add_unwind_opcode (op, 1);
17145 }
17146 else
17147 flush_pending_unwind ();
17148 }
17149
17150
17151 /* Start an exception table entry. If idx is nonzero this is an index table
17152 entry. */
17153
17154 static void
17155 start_unwind_section (const segT text_seg, int idx)
17156 {
17157 const char * text_name;
17158 const char * prefix;
17159 const char * prefix_once;
17160 const char * group_name;
17161 size_t prefix_len;
17162 size_t text_len;
17163 char * sec_name;
17164 size_t sec_name_len;
17165 int type;
17166 int flags;
17167 int linkonce;
17168
17169 if (idx)
17170 {
17171 prefix = ELF_STRING_ARM_unwind;
17172 prefix_once = ELF_STRING_ARM_unwind_once;
17173 type = SHT_ARM_EXIDX;
17174 }
17175 else
17176 {
17177 prefix = ELF_STRING_ARM_unwind_info;
17178 prefix_once = ELF_STRING_ARM_unwind_info_once;
17179 type = SHT_PROGBITS;
17180 }
17181
17182 text_name = segment_name (text_seg);
17183 if (streq (text_name, ".text"))
17184 text_name = "";
17185
17186 if (strncmp (text_name, ".gnu.linkonce.t.",
17187 strlen (".gnu.linkonce.t.")) == 0)
17188 {
17189 prefix = prefix_once;
17190 text_name += strlen (".gnu.linkonce.t.");
17191 }
17192
17193 prefix_len = strlen (prefix);
17194 text_len = strlen (text_name);
17195 sec_name_len = prefix_len + text_len;
17196 sec_name = xmalloc (sec_name_len + 1);
17197 memcpy (sec_name, prefix, prefix_len);
17198 memcpy (sec_name + prefix_len, text_name, text_len);
17199 sec_name[prefix_len + text_len] = '\0';
17200
17201 flags = SHF_ALLOC;
17202 linkonce = 0;
17203 group_name = 0;
17204
17205 /* Handle COMDAT group. */
17206 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
17207 {
17208 group_name = elf_group_name (text_seg);
17209 if (group_name == NULL)
17210 {
17211 as_bad ("Group section `%s' has no group signature",
17212 segment_name (text_seg));
17213 ignore_rest_of_line ();
17214 return;
17215 }
17216 flags |= SHF_GROUP;
17217 linkonce = 1;
17218 }
17219
17220 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
17221
17222 /* Set the setion link for index tables. */
17223 if (idx)
17224 elf_linked_to_section (now_seg) = text_seg;
17225 }
17226
17227
17228 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17229 personality routine data. Returns zero, or the index table value for
17230 and inline entry. */
17231
17232 static valueT
17233 create_unwind_entry (int have_data)
17234 {
17235 int size;
17236 addressT where;
17237 char *ptr;
17238 /* The current word of data. */
17239 valueT data;
17240 /* The number of bytes left in this word. */
17241 int n;
17242
17243 finish_unwind_opcodes ();
17244
17245 /* Remember the current text section. */
17246 unwind.saved_seg = now_seg;
17247 unwind.saved_subseg = now_subseg;
17248
17249 start_unwind_section (now_seg, 0);
17250
17251 if (unwind.personality_routine == NULL)
17252 {
17253 if (unwind.personality_index == -2)
17254 {
17255 if (have_data)
17256 as_bad (_("handerdata in cantunwind frame"));
17257 return 1; /* EXIDX_CANTUNWIND. */
17258 }
17259
17260 /* Use a default personality routine if none is specified. */
17261 if (unwind.personality_index == -1)
17262 {
17263 if (unwind.opcode_count > 3)
17264 unwind.personality_index = 1;
17265 else
17266 unwind.personality_index = 0;
17267 }
17268
17269 /* Space for the personality routine entry. */
17270 if (unwind.personality_index == 0)
17271 {
17272 if (unwind.opcode_count > 3)
17273 as_bad (_("too many unwind opcodes for personality routine 0"));
17274
17275 if (!have_data)
17276 {
17277 /* All the data is inline in the index table. */
17278 data = 0x80;
17279 n = 3;
17280 while (unwind.opcode_count > 0)
17281 {
17282 unwind.opcode_count--;
17283 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17284 n--;
17285 }
17286
17287 /* Pad with "finish" opcodes. */
17288 while (n--)
17289 data = (data << 8) | 0xb0;
17290
17291 return data;
17292 }
17293 size = 0;
17294 }
17295 else
17296 /* We get two opcodes "free" in the first word. */
17297 size = unwind.opcode_count - 2;
17298 }
17299 else
17300 /* An extra byte is required for the opcode count. */
17301 size = unwind.opcode_count + 1;
17302
17303 size = (size + 3) >> 2;
17304 if (size > 0xff)
17305 as_bad (_("too many unwind opcodes"));
17306
17307 frag_align (2, 0, 0);
17308 record_alignment (now_seg, 2);
17309 unwind.table_entry = expr_build_dot ();
17310
17311 /* Allocate the table entry. */
17312 ptr = frag_more ((size << 2) + 4);
17313 where = frag_now_fix () - ((size << 2) + 4);
17314
17315 switch (unwind.personality_index)
17316 {
17317 case -1:
17318 /* ??? Should this be a PLT generating relocation? */
17319 /* Custom personality routine. */
17320 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17321 BFD_RELOC_ARM_PREL31);
17322
17323 where += 4;
17324 ptr += 4;
17325
17326 /* Set the first byte to the number of additional words. */
17327 data = size - 1;
17328 n = 3;
17329 break;
17330
17331 /* ABI defined personality routines. */
17332 case 0:
17333 /* Three opcodes bytes are packed into the first word. */
17334 data = 0x80;
17335 n = 3;
17336 break;
17337
17338 case 1:
17339 case 2:
17340 /* The size and first two opcode bytes go in the first word. */
17341 data = ((0x80 + unwind.personality_index) << 8) | size;
17342 n = 2;
17343 break;
17344
17345 default:
17346 /* Should never happen. */
17347 abort ();
17348 }
17349
17350 /* Pack the opcodes into words (MSB first), reversing the list at the same
17351 time. */
17352 while (unwind.opcode_count > 0)
17353 {
17354 if (n == 0)
17355 {
17356 md_number_to_chars (ptr, data, 4);
17357 ptr += 4;
17358 n = 4;
17359 data = 0;
17360 }
17361 unwind.opcode_count--;
17362 n--;
17363 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17364 }
17365
17366 /* Finish off the last word. */
17367 if (n < 4)
17368 {
17369 /* Pad with "finish" opcodes. */
17370 while (n--)
17371 data = (data << 8) | 0xb0;
17372
17373 md_number_to_chars (ptr, data, 4);
17374 }
17375
17376 if (!have_data)
17377 {
17378 /* Add an empty descriptor if there is no user-specified data. */
17379 ptr = frag_more (4);
17380 md_number_to_chars (ptr, 0, 4);
17381 }
17382
17383 return 0;
17384 }
17385
17386
17387 /* Initialize the DWARF-2 unwind information for this procedure. */
17388
17389 void
17390 tc_arm_frame_initial_instructions (void)
17391 {
17392 cfi_add_CFA_def_cfa (REG_SP, 0);
17393 }
17394 #endif /* OBJ_ELF */
17395
17396 /* Convert REGNAME to a DWARF-2 register number. */
17397
17398 int
17399 tc_arm_regname_to_dw2regnum (char *regname)
17400 {
17401 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
17402
17403 if (reg == FAIL)
17404 return -1;
17405
17406 return reg;
17407 }
17408
17409 #ifdef TE_PE
17410 void
17411 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
17412 {
17413 expressionS expr;
17414
17415 expr.X_op = O_secrel;
17416 expr.X_add_symbol = symbol;
17417 expr.X_add_number = 0;
17418 emit_expr (&expr, size);
17419 }
17420 #endif
17421
17422 /* MD interface: Symbol and relocation handling. */
17423
17424 /* Return the address within the segment that a PC-relative fixup is
17425 relative to. For ARM, PC-relative fixups applied to instructions
17426 are generally relative to the location of the fixup plus 8 bytes.
17427 Thumb branches are offset by 4, and Thumb loads relative to PC
17428 require special handling. */
17429
17430 long
17431 md_pcrel_from_section (fixS * fixP, segT seg)
17432 {
17433 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
17434
17435 /* If this is pc-relative and we are going to emit a relocation
17436 then we just want to put out any pipeline compensation that the linker
17437 will need. Otherwise we want to use the calculated base.
17438 For WinCE we skip the bias for externals as well, since this
17439 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17440 if (fixP->fx_pcrel
17441 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
17442 || (arm_force_relocation (fixP)
17443 #ifdef TE_WINCE
17444 && !S_IS_EXTERNAL (fixP->fx_addsy)
17445 #endif
17446 )))
17447 base = 0;
17448
17449 switch (fixP->fx_r_type)
17450 {
17451 /* PC relative addressing on the Thumb is slightly odd as the
17452 bottom two bits of the PC are forced to zero for the
17453 calculation. This happens *after* application of the
17454 pipeline offset. However, Thumb adrl already adjusts for
17455 this, so we need not do it again. */
17456 case BFD_RELOC_ARM_THUMB_ADD:
17457 return base & ~3;
17458
17459 case BFD_RELOC_ARM_THUMB_OFFSET:
17460 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17461 case BFD_RELOC_ARM_T32_ADD_PC12:
17462 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17463 return (base + 4) & ~3;
17464
17465 /* Thumb branches are simply offset by +4. */
17466 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17467 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17468 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17469 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17470 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17471 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17472 case BFD_RELOC_THUMB_PCREL_BLX:
17473 return base + 4;
17474
17475 /* ARM mode branches are offset by +8. However, the Windows CE
17476 loader expects the relocation not to take this into account. */
17477 case BFD_RELOC_ARM_PCREL_BRANCH:
17478 case BFD_RELOC_ARM_PCREL_CALL:
17479 case BFD_RELOC_ARM_PCREL_JUMP:
17480 case BFD_RELOC_ARM_PCREL_BLX:
17481 case BFD_RELOC_ARM_PLT32:
17482 #ifdef TE_WINCE
17483 /* When handling fixups immediately, because we have already
17484 discovered the value of a symbol, or the address of the frag involved
17485 we must account for the offset by +8, as the OS loader will never see the reloc.
17486 see fixup_segment() in write.c
17487 The S_IS_EXTERNAL test handles the case of global symbols.
17488 Those need the calculated base, not just the pipe compensation the linker will need. */
17489 if (fixP->fx_pcrel
17490 && fixP->fx_addsy != NULL
17491 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
17492 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
17493 return base + 8;
17494 return base;
17495 #else
17496 return base + 8;
17497 #endif
17498
17499 /* ARM mode loads relative to PC are also offset by +8. Unlike
17500 branches, the Windows CE loader *does* expect the relocation
17501 to take this into account. */
17502 case BFD_RELOC_ARM_OFFSET_IMM:
17503 case BFD_RELOC_ARM_OFFSET_IMM8:
17504 case BFD_RELOC_ARM_HWLITERAL:
17505 case BFD_RELOC_ARM_LITERAL:
17506 case BFD_RELOC_ARM_CP_OFF_IMM:
17507 return base + 8;
17508
17509
17510 /* Other PC-relative relocations are un-offset. */
17511 default:
17512 return base;
17513 }
17514 }
17515
17516 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17517 Otherwise we have no need to default values of symbols. */
17518
17519 symbolS *
17520 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
17521 {
17522 #ifdef OBJ_ELF
17523 if (name[0] == '_' && name[1] == 'G'
17524 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
17525 {
17526 if (!GOT_symbol)
17527 {
17528 if (symbol_find (name))
17529 as_bad ("GOT already in the symbol table");
17530
17531 GOT_symbol = symbol_new (name, undefined_section,
17532 (valueT) 0, & zero_address_frag);
17533 }
17534
17535 return GOT_symbol;
17536 }
17537 #endif
17538
17539 return 0;
17540 }
17541
17542 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17543 computed as two separate immediate values, added together. We
17544 already know that this value cannot be computed by just one ARM
17545 instruction. */
17546
17547 static unsigned int
17548 validate_immediate_twopart (unsigned int val,
17549 unsigned int * highpart)
17550 {
17551 unsigned int a;
17552 unsigned int i;
17553
17554 for (i = 0; i < 32; i += 2)
17555 if (((a = rotate_left (val, i)) & 0xff) != 0)
17556 {
17557 if (a & 0xff00)
17558 {
17559 if (a & ~ 0xffff)
17560 continue;
17561 * highpart = (a >> 8) | ((i + 24) << 7);
17562 }
17563 else if (a & 0xff0000)
17564 {
17565 if (a & 0xff000000)
17566 continue;
17567 * highpart = (a >> 16) | ((i + 16) << 7);
17568 }
17569 else
17570 {
17571 assert (a & 0xff000000);
17572 * highpart = (a >> 24) | ((i + 8) << 7);
17573 }
17574
17575 return (a & 0xff) | (i << 7);
17576 }
17577
17578 return FAIL;
17579 }
17580
17581 static int
17582 validate_offset_imm (unsigned int val, int hwse)
17583 {
17584 if ((hwse && val > 255) || val > 4095)
17585 return FAIL;
17586 return val;
17587 }
17588
17589 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17590 negative immediate constant by altering the instruction. A bit of
17591 a hack really.
17592 MOV <-> MVN
17593 AND <-> BIC
17594 ADC <-> SBC
17595 by inverting the second operand, and
17596 ADD <-> SUB
17597 CMP <-> CMN
17598 by negating the second operand. */
17599
17600 static int
17601 negate_data_op (unsigned long * instruction,
17602 unsigned long value)
17603 {
17604 int op, new_inst;
17605 unsigned long negated, inverted;
17606
17607 negated = encode_arm_immediate (-value);
17608 inverted = encode_arm_immediate (~value);
17609
17610 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
17611 switch (op)
17612 {
17613 /* First negates. */
17614 case OPCODE_SUB: /* ADD <-> SUB */
17615 new_inst = OPCODE_ADD;
17616 value = negated;
17617 break;
17618
17619 case OPCODE_ADD:
17620 new_inst = OPCODE_SUB;
17621 value = negated;
17622 break;
17623
17624 case OPCODE_CMP: /* CMP <-> CMN */
17625 new_inst = OPCODE_CMN;
17626 value = negated;
17627 break;
17628
17629 case OPCODE_CMN:
17630 new_inst = OPCODE_CMP;
17631 value = negated;
17632 break;
17633
17634 /* Now Inverted ops. */
17635 case OPCODE_MOV: /* MOV <-> MVN */
17636 new_inst = OPCODE_MVN;
17637 value = inverted;
17638 break;
17639
17640 case OPCODE_MVN:
17641 new_inst = OPCODE_MOV;
17642 value = inverted;
17643 break;
17644
17645 case OPCODE_AND: /* AND <-> BIC */
17646 new_inst = OPCODE_BIC;
17647 value = inverted;
17648 break;
17649
17650 case OPCODE_BIC:
17651 new_inst = OPCODE_AND;
17652 value = inverted;
17653 break;
17654
17655 case OPCODE_ADC: /* ADC <-> SBC */
17656 new_inst = OPCODE_SBC;
17657 value = inverted;
17658 break;
17659
17660 case OPCODE_SBC:
17661 new_inst = OPCODE_ADC;
17662 value = inverted;
17663 break;
17664
17665 /* We cannot do anything. */
17666 default:
17667 return FAIL;
17668 }
17669
17670 if (value == (unsigned) FAIL)
17671 return FAIL;
17672
17673 *instruction &= OPCODE_MASK;
17674 *instruction |= new_inst << DATA_OP_SHIFT;
17675 return value;
17676 }
17677
17678 /* Like negate_data_op, but for Thumb-2. */
17679
17680 static unsigned int
17681 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
17682 {
17683 int op, new_inst;
17684 int rd;
17685 unsigned int negated, inverted;
17686
17687 negated = encode_thumb32_immediate (-value);
17688 inverted = encode_thumb32_immediate (~value);
17689
17690 rd = (*instruction >> 8) & 0xf;
17691 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
17692 switch (op)
17693 {
17694 /* ADD <-> SUB. Includes CMP <-> CMN. */
17695 case T2_OPCODE_SUB:
17696 new_inst = T2_OPCODE_ADD;
17697 value = negated;
17698 break;
17699
17700 case T2_OPCODE_ADD:
17701 new_inst = T2_OPCODE_SUB;
17702 value = negated;
17703 break;
17704
17705 /* ORR <-> ORN. Includes MOV <-> MVN. */
17706 case T2_OPCODE_ORR:
17707 new_inst = T2_OPCODE_ORN;
17708 value = inverted;
17709 break;
17710
17711 case T2_OPCODE_ORN:
17712 new_inst = T2_OPCODE_ORR;
17713 value = inverted;
17714 break;
17715
17716 /* AND <-> BIC. TST has no inverted equivalent. */
17717 case T2_OPCODE_AND:
17718 new_inst = T2_OPCODE_BIC;
17719 if (rd == 15)
17720 value = FAIL;
17721 else
17722 value = inverted;
17723 break;
17724
17725 case T2_OPCODE_BIC:
17726 new_inst = T2_OPCODE_AND;
17727 value = inverted;
17728 break;
17729
17730 /* ADC <-> SBC */
17731 case T2_OPCODE_ADC:
17732 new_inst = T2_OPCODE_SBC;
17733 value = inverted;
17734 break;
17735
17736 case T2_OPCODE_SBC:
17737 new_inst = T2_OPCODE_ADC;
17738 value = inverted;
17739 break;
17740
17741 /* We cannot do anything. */
17742 default:
17743 return FAIL;
17744 }
17745
17746 if (value == (unsigned int)FAIL)
17747 return FAIL;
17748
17749 *instruction &= T2_OPCODE_MASK;
17750 *instruction |= new_inst << T2_DATA_OP_SHIFT;
17751 return value;
17752 }
17753
17754 /* Read a 32-bit thumb instruction from buf. */
17755 static unsigned long
17756 get_thumb32_insn (char * buf)
17757 {
17758 unsigned long insn;
17759 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
17760 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17761
17762 return insn;
17763 }
17764
17765
17766 /* We usually want to set the low bit on the address of thumb function
17767 symbols. In particular .word foo - . should have the low bit set.
17768 Generic code tries to fold the difference of two symbols to
17769 a constant. Prevent this and force a relocation when the first symbols
17770 is a thumb function. */
17771 int
17772 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
17773 {
17774 if (op == O_subtract
17775 && l->X_op == O_symbol
17776 && r->X_op == O_symbol
17777 && THUMB_IS_FUNC (l->X_add_symbol))
17778 {
17779 l->X_op = O_subtract;
17780 l->X_op_symbol = r->X_add_symbol;
17781 l->X_add_number -= r->X_add_number;
17782 return 1;
17783 }
17784 /* Process as normal. */
17785 return 0;
17786 }
17787
17788 void
17789 md_apply_fix (fixS * fixP,
17790 valueT * valP,
17791 segT seg)
17792 {
17793 offsetT value = * valP;
17794 offsetT newval;
17795 unsigned int newimm;
17796 unsigned long temp;
17797 int sign;
17798 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
17799
17800 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
17801
17802 /* Note whether this will delete the relocation. */
17803
17804 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
17805 fixP->fx_done = 1;
17806
17807 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17808 consistency with the behavior on 32-bit hosts. Remember value
17809 for emit_reloc. */
17810 value &= 0xffffffff;
17811 value ^= 0x80000000;
17812 value -= 0x80000000;
17813
17814 *valP = value;
17815 fixP->fx_addnumber = value;
17816
17817 /* Same treatment for fixP->fx_offset. */
17818 fixP->fx_offset &= 0xffffffff;
17819 fixP->fx_offset ^= 0x80000000;
17820 fixP->fx_offset -= 0x80000000;
17821
17822 switch (fixP->fx_r_type)
17823 {
17824 case BFD_RELOC_NONE:
17825 /* This will need to go in the object file. */
17826 fixP->fx_done = 0;
17827 break;
17828
17829 case BFD_RELOC_ARM_IMMEDIATE:
17830 /* We claim that this fixup has been processed here,
17831 even if in fact we generate an error because we do
17832 not have a reloc for it, so tc_gen_reloc will reject it. */
17833 fixP->fx_done = 1;
17834
17835 if (fixP->fx_addsy
17836 && ! S_IS_DEFINED (fixP->fx_addsy))
17837 {
17838 as_bad_where (fixP->fx_file, fixP->fx_line,
17839 _("undefined symbol %s used as an immediate value"),
17840 S_GET_NAME (fixP->fx_addsy));
17841 break;
17842 }
17843
17844 newimm = encode_arm_immediate (value);
17845 temp = md_chars_to_number (buf, INSN_SIZE);
17846
17847 /* If the instruction will fail, see if we can fix things up by
17848 changing the opcode. */
17849 if (newimm == (unsigned int) FAIL
17850 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
17851 {
17852 as_bad_where (fixP->fx_file, fixP->fx_line,
17853 _("invalid constant (%lx) after fixup"),
17854 (unsigned long) value);
17855 break;
17856 }
17857
17858 newimm |= (temp & 0xfffff000);
17859 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17860 break;
17861
17862 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17863 {
17864 unsigned int highpart = 0;
17865 unsigned int newinsn = 0xe1a00000; /* nop. */
17866
17867 newimm = encode_arm_immediate (value);
17868 temp = md_chars_to_number (buf, INSN_SIZE);
17869
17870 /* If the instruction will fail, see if we can fix things up by
17871 changing the opcode. */
17872 if (newimm == (unsigned int) FAIL
17873 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
17874 {
17875 /* No ? OK - try using two ADD instructions to generate
17876 the value. */
17877 newimm = validate_immediate_twopart (value, & highpart);
17878
17879 /* Yes - then make sure that the second instruction is
17880 also an add. */
17881 if (newimm != (unsigned int) FAIL)
17882 newinsn = temp;
17883 /* Still No ? Try using a negated value. */
17884 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
17885 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
17886 /* Otherwise - give up. */
17887 else
17888 {
17889 as_bad_where (fixP->fx_file, fixP->fx_line,
17890 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17891 (long) value);
17892 break;
17893 }
17894
17895 /* Replace the first operand in the 2nd instruction (which
17896 is the PC) with the destination register. We have
17897 already added in the PC in the first instruction and we
17898 do not want to do it again. */
17899 newinsn &= ~ 0xf0000;
17900 newinsn |= ((newinsn & 0x0f000) << 4);
17901 }
17902
17903 newimm |= (temp & 0xfffff000);
17904 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17905
17906 highpart |= (newinsn & 0xfffff000);
17907 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
17908 }
17909 break;
17910
17911 case BFD_RELOC_ARM_OFFSET_IMM:
17912 if (!fixP->fx_done && seg->use_rela_p)
17913 value = 0;
17914
17915 case BFD_RELOC_ARM_LITERAL:
17916 sign = value >= 0;
17917
17918 if (value < 0)
17919 value = - value;
17920
17921 if (validate_offset_imm (value, 0) == FAIL)
17922 {
17923 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
17924 as_bad_where (fixP->fx_file, fixP->fx_line,
17925 _("invalid literal constant: pool needs to be closer"));
17926 else
17927 as_bad_where (fixP->fx_file, fixP->fx_line,
17928 _("bad immediate value for offset (%ld)"),
17929 (long) value);
17930 break;
17931 }
17932
17933 newval = md_chars_to_number (buf, INSN_SIZE);
17934 newval &= 0xff7ff000;
17935 newval |= value | (sign ? INDEX_UP : 0);
17936 md_number_to_chars (buf, newval, INSN_SIZE);
17937 break;
17938
17939 case BFD_RELOC_ARM_OFFSET_IMM8:
17940 case BFD_RELOC_ARM_HWLITERAL:
17941 sign = value >= 0;
17942
17943 if (value < 0)
17944 value = - value;
17945
17946 if (validate_offset_imm (value, 1) == FAIL)
17947 {
17948 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
17949 as_bad_where (fixP->fx_file, fixP->fx_line,
17950 _("invalid literal constant: pool needs to be closer"));
17951 else
17952 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
17953 (long) value);
17954 break;
17955 }
17956
17957 newval = md_chars_to_number (buf, INSN_SIZE);
17958 newval &= 0xff7ff0f0;
17959 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
17960 md_number_to_chars (buf, newval, INSN_SIZE);
17961 break;
17962
17963 case BFD_RELOC_ARM_T32_OFFSET_U8:
17964 if (value < 0 || value > 1020 || value % 4 != 0)
17965 as_bad_where (fixP->fx_file, fixP->fx_line,
17966 _("bad immediate value for offset (%ld)"), (long) value);
17967 value /= 4;
17968
17969 newval = md_chars_to_number (buf+2, THUMB_SIZE);
17970 newval |= value;
17971 md_number_to_chars (buf+2, newval, THUMB_SIZE);
17972 break;
17973
17974 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17975 /* This is a complicated relocation used for all varieties of Thumb32
17976 load/store instruction with immediate offset:
17977
17978 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17979 *4, optional writeback(W)
17980 (doubleword load/store)
17981
17982 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17983 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17984 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17985 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17986 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17987
17988 Uppercase letters indicate bits that are already encoded at
17989 this point. Lowercase letters are our problem. For the
17990 second block of instructions, the secondary opcode nybble
17991 (bits 8..11) is present, and bit 23 is zero, even if this is
17992 a PC-relative operation. */
17993 newval = md_chars_to_number (buf, THUMB_SIZE);
17994 newval <<= 16;
17995 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
17996
17997 if ((newval & 0xf0000000) == 0xe0000000)
17998 {
17999 /* Doubleword load/store: 8-bit offset, scaled by 4. */
18000 if (value >= 0)
18001 newval |= (1 << 23);
18002 else
18003 value = -value;
18004 if (value % 4 != 0)
18005 {
18006 as_bad_where (fixP->fx_file, fixP->fx_line,
18007 _("offset not a multiple of 4"));
18008 break;
18009 }
18010 value /= 4;
18011 if (value > 0xff)
18012 {
18013 as_bad_where (fixP->fx_file, fixP->fx_line,
18014 _("offset out of range"));
18015 break;
18016 }
18017 newval &= ~0xff;
18018 }
18019 else if ((newval & 0x000f0000) == 0x000f0000)
18020 {
18021 /* PC-relative, 12-bit offset. */
18022 if (value >= 0)
18023 newval |= (1 << 23);
18024 else
18025 value = -value;
18026 if (value > 0xfff)
18027 {
18028 as_bad_where (fixP->fx_file, fixP->fx_line,
18029 _("offset out of range"));
18030 break;
18031 }
18032 newval &= ~0xfff;
18033 }
18034 else if ((newval & 0x00000100) == 0x00000100)
18035 {
18036 /* Writeback: 8-bit, +/- offset. */
18037 if (value >= 0)
18038 newval |= (1 << 9);
18039 else
18040 value = -value;
18041 if (value > 0xff)
18042 {
18043 as_bad_where (fixP->fx_file, fixP->fx_line,
18044 _("offset out of range"));
18045 break;
18046 }
18047 newval &= ~0xff;
18048 }
18049 else if ((newval & 0x00000f00) == 0x00000e00)
18050 {
18051 /* T-instruction: positive 8-bit offset. */
18052 if (value < 0 || value > 0xff)
18053 {
18054 as_bad_where (fixP->fx_file, fixP->fx_line,
18055 _("offset out of range"));
18056 break;
18057 }
18058 newval &= ~0xff;
18059 newval |= value;
18060 }
18061 else
18062 {
18063 /* Positive 12-bit or negative 8-bit offset. */
18064 int limit;
18065 if (value >= 0)
18066 {
18067 newval |= (1 << 23);
18068 limit = 0xfff;
18069 }
18070 else
18071 {
18072 value = -value;
18073 limit = 0xff;
18074 }
18075 if (value > limit)
18076 {
18077 as_bad_where (fixP->fx_file, fixP->fx_line,
18078 _("offset out of range"));
18079 break;
18080 }
18081 newval &= ~limit;
18082 }
18083
18084 newval |= value;
18085 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
18086 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
18087 break;
18088
18089 case BFD_RELOC_ARM_SHIFT_IMM:
18090 newval = md_chars_to_number (buf, INSN_SIZE);
18091 if (((unsigned long) value) > 32
18092 || (value == 32
18093 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
18094 {
18095 as_bad_where (fixP->fx_file, fixP->fx_line,
18096 _("shift expression is too large"));
18097 break;
18098 }
18099
18100 if (value == 0)
18101 /* Shifts of zero must be done as lsl. */
18102 newval &= ~0x60;
18103 else if (value == 32)
18104 value = 0;
18105 newval &= 0xfffff07f;
18106 newval |= (value & 0x1f) << 7;
18107 md_number_to_chars (buf, newval, INSN_SIZE);
18108 break;
18109
18110 case BFD_RELOC_ARM_T32_IMMEDIATE:
18111 case BFD_RELOC_ARM_T32_ADD_IMM:
18112 case BFD_RELOC_ARM_T32_IMM12:
18113 case BFD_RELOC_ARM_T32_ADD_PC12:
18114 /* We claim that this fixup has been processed here,
18115 even if in fact we generate an error because we do
18116 not have a reloc for it, so tc_gen_reloc will reject it. */
18117 fixP->fx_done = 1;
18118
18119 if (fixP->fx_addsy
18120 && ! S_IS_DEFINED (fixP->fx_addsy))
18121 {
18122 as_bad_where (fixP->fx_file, fixP->fx_line,
18123 _("undefined symbol %s used as an immediate value"),
18124 S_GET_NAME (fixP->fx_addsy));
18125 break;
18126 }
18127
18128 newval = md_chars_to_number (buf, THUMB_SIZE);
18129 newval <<= 16;
18130 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
18131
18132 newimm = FAIL;
18133 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18134 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18135 {
18136 newimm = encode_thumb32_immediate (value);
18137 if (newimm == (unsigned int) FAIL)
18138 newimm = thumb32_negate_data_op (&newval, value);
18139 }
18140 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
18141 && newimm == (unsigned int) FAIL)
18142 {
18143 /* Turn add/sum into addw/subw. */
18144 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18145 newval = (newval & 0xfeffffff) | 0x02000000;
18146
18147 /* 12 bit immediate for addw/subw. */
18148 if (value < 0)
18149 {
18150 value = -value;
18151 newval ^= 0x00a00000;
18152 }
18153 if (value > 0xfff)
18154 newimm = (unsigned int) FAIL;
18155 else
18156 newimm = value;
18157 }
18158
18159 if (newimm == (unsigned int)FAIL)
18160 {
18161 as_bad_where (fixP->fx_file, fixP->fx_line,
18162 _("invalid constant (%lx) after fixup"),
18163 (unsigned long) value);
18164 break;
18165 }
18166
18167 newval |= (newimm & 0x800) << 15;
18168 newval |= (newimm & 0x700) << 4;
18169 newval |= (newimm & 0x0ff);
18170
18171 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
18172 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
18173 break;
18174
18175 case BFD_RELOC_ARM_SMC:
18176 if (((unsigned long) value) > 0xffff)
18177 as_bad_where (fixP->fx_file, fixP->fx_line,
18178 _("invalid smc expression"));
18179 newval = md_chars_to_number (buf, INSN_SIZE);
18180 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
18181 md_number_to_chars (buf, newval, INSN_SIZE);
18182 break;
18183
18184 case BFD_RELOC_ARM_SWI:
18185 if (fixP->tc_fix_data != 0)
18186 {
18187 if (((unsigned long) value) > 0xff)
18188 as_bad_where (fixP->fx_file, fixP->fx_line,
18189 _("invalid swi expression"));
18190 newval = md_chars_to_number (buf, THUMB_SIZE);
18191 newval |= value;
18192 md_number_to_chars (buf, newval, THUMB_SIZE);
18193 }
18194 else
18195 {
18196 if (((unsigned long) value) > 0x00ffffff)
18197 as_bad_where (fixP->fx_file, fixP->fx_line,
18198 _("invalid swi expression"));
18199 newval = md_chars_to_number (buf, INSN_SIZE);
18200 newval |= value;
18201 md_number_to_chars (buf, newval, INSN_SIZE);
18202 }
18203 break;
18204
18205 case BFD_RELOC_ARM_MULTI:
18206 if (((unsigned long) value) > 0xffff)
18207 as_bad_where (fixP->fx_file, fixP->fx_line,
18208 _("invalid expression in load/store multiple"));
18209 newval = value | md_chars_to_number (buf, INSN_SIZE);
18210 md_number_to_chars (buf, newval, INSN_SIZE);
18211 break;
18212
18213 #ifdef OBJ_ELF
18214 case BFD_RELOC_ARM_PCREL_CALL:
18215 newval = md_chars_to_number (buf, INSN_SIZE);
18216 if ((newval & 0xf0000000) == 0xf0000000)
18217 temp = 1;
18218 else
18219 temp = 3;
18220 goto arm_branch_common;
18221
18222 case BFD_RELOC_ARM_PCREL_JUMP:
18223 case BFD_RELOC_ARM_PLT32:
18224 #endif
18225 case BFD_RELOC_ARM_PCREL_BRANCH:
18226 temp = 3;
18227 goto arm_branch_common;
18228
18229 case BFD_RELOC_ARM_PCREL_BLX:
18230 temp = 1;
18231 arm_branch_common:
18232 /* We are going to store value (shifted right by two) in the
18233 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18234 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18235 also be be clear. */
18236 if (value & temp)
18237 as_bad_where (fixP->fx_file, fixP->fx_line,
18238 _("misaligned branch destination"));
18239 if ((value & (offsetT)0xfe000000) != (offsetT)0
18240 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
18241 as_bad_where (fixP->fx_file, fixP->fx_line,
18242 _("branch out of range"));
18243
18244 if (fixP->fx_done || !seg->use_rela_p)
18245 {
18246 newval = md_chars_to_number (buf, INSN_SIZE);
18247 newval |= (value >> 2) & 0x00ffffff;
18248 /* Set the H bit on BLX instructions. */
18249 if (temp == 1)
18250 {
18251 if (value & 2)
18252 newval |= 0x01000000;
18253 else
18254 newval &= ~0x01000000;
18255 }
18256 md_number_to_chars (buf, newval, INSN_SIZE);
18257 }
18258 break;
18259
18260 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
18261 /* CBZ can only branch forward. */
18262
18263 /* Attempts to use CBZ to branch to the next instruction
18264 (which, strictly speaking, are prohibited) will be turned into
18265 no-ops.
18266
18267 FIXME: It may be better to remove the instruction completely and
18268 perform relaxation. */
18269 if (value == -2)
18270 {
18271 newval = md_chars_to_number (buf, THUMB_SIZE);
18272 newval = 0xbf00; /* NOP encoding T1 */
18273 md_number_to_chars (buf, newval, THUMB_SIZE);
18274 }
18275 else
18276 {
18277 if (value & ~0x7e)
18278 as_bad_where (fixP->fx_file, fixP->fx_line,
18279 _("branch out of range"));
18280
18281 if (fixP->fx_done || !seg->use_rela_p)
18282 {
18283 newval = md_chars_to_number (buf, THUMB_SIZE);
18284 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
18285 md_number_to_chars (buf, newval, THUMB_SIZE);
18286 }
18287 }
18288 break;
18289
18290 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
18291 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
18292 as_bad_where (fixP->fx_file, fixP->fx_line,
18293 _("branch out of range"));
18294
18295 if (fixP->fx_done || !seg->use_rela_p)
18296 {
18297 newval = md_chars_to_number (buf, THUMB_SIZE);
18298 newval |= (value & 0x1ff) >> 1;
18299 md_number_to_chars (buf, newval, THUMB_SIZE);
18300 }
18301 break;
18302
18303 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18304 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18305 as_bad_where (fixP->fx_file, fixP->fx_line,
18306 _("branch out of range"));
18307
18308 if (fixP->fx_done || !seg->use_rela_p)
18309 {
18310 newval = md_chars_to_number (buf, THUMB_SIZE);
18311 newval |= (value & 0xfff) >> 1;
18312 md_number_to_chars (buf, newval, THUMB_SIZE);
18313 }
18314 break;
18315
18316 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18317 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18318 as_bad_where (fixP->fx_file, fixP->fx_line,
18319 _("conditional branch out of range"));
18320
18321 if (fixP->fx_done || !seg->use_rela_p)
18322 {
18323 offsetT newval2;
18324 addressT S, J1, J2, lo, hi;
18325
18326 S = (value & 0x00100000) >> 20;
18327 J2 = (value & 0x00080000) >> 19;
18328 J1 = (value & 0x00040000) >> 18;
18329 hi = (value & 0x0003f000) >> 12;
18330 lo = (value & 0x00000ffe) >> 1;
18331
18332 newval = md_chars_to_number (buf, THUMB_SIZE);
18333 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18334 newval |= (S << 10) | hi;
18335 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18336 md_number_to_chars (buf, newval, THUMB_SIZE);
18337 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18338 }
18339 break;
18340
18341 case BFD_RELOC_THUMB_PCREL_BLX:
18342 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18343 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
18344 as_bad_where (fixP->fx_file, fixP->fx_line,
18345 _("branch out of range"));
18346
18347 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
18348 /* For a BLX instruction, make sure that the relocation is rounded up
18349 to a word boundary. This follows the semantics of the instruction
18350 which specifies that bit 1 of the target address will come from bit
18351 1 of the base address. */
18352 value = (value + 1) & ~ 1;
18353
18354 if (fixP->fx_done || !seg->use_rela_p)
18355 {
18356 offsetT newval2;
18357
18358 newval = md_chars_to_number (buf, THUMB_SIZE);
18359 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18360 newval |= (value & 0x7fffff) >> 12;
18361 newval2 |= (value & 0xfff) >> 1;
18362 md_number_to_chars (buf, newval, THUMB_SIZE);
18363 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18364 }
18365 break;
18366
18367 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18368 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
18369 as_bad_where (fixP->fx_file, fixP->fx_line,
18370 _("branch out of range"));
18371
18372 if (fixP->fx_done || !seg->use_rela_p)
18373 {
18374 offsetT newval2;
18375 addressT S, I1, I2, lo, hi;
18376
18377 S = (value & 0x01000000) >> 24;
18378 I1 = (value & 0x00800000) >> 23;
18379 I2 = (value & 0x00400000) >> 22;
18380 hi = (value & 0x003ff000) >> 12;
18381 lo = (value & 0x00000ffe) >> 1;
18382
18383 I1 = !(I1 ^ S);
18384 I2 = !(I2 ^ S);
18385
18386 newval = md_chars_to_number (buf, THUMB_SIZE);
18387 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18388 newval |= (S << 10) | hi;
18389 newval2 |= (I1 << 13) | (I2 << 11) | lo;
18390 md_number_to_chars (buf, newval, THUMB_SIZE);
18391 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18392 }
18393 break;
18394
18395 case BFD_RELOC_8:
18396 if (fixP->fx_done || !seg->use_rela_p)
18397 md_number_to_chars (buf, value, 1);
18398 break;
18399
18400 case BFD_RELOC_16:
18401 if (fixP->fx_done || !seg->use_rela_p)
18402 md_number_to_chars (buf, value, 2);
18403 break;
18404
18405 #ifdef OBJ_ELF
18406 case BFD_RELOC_ARM_TLS_GD32:
18407 case BFD_RELOC_ARM_TLS_LE32:
18408 case BFD_RELOC_ARM_TLS_IE32:
18409 case BFD_RELOC_ARM_TLS_LDM32:
18410 case BFD_RELOC_ARM_TLS_LDO32:
18411 S_SET_THREAD_LOCAL (fixP->fx_addsy);
18412 /* fall through */
18413
18414 case BFD_RELOC_ARM_GOT32:
18415 case BFD_RELOC_ARM_GOTOFF:
18416 case BFD_RELOC_ARM_TARGET2:
18417 if (fixP->fx_done || !seg->use_rela_p)
18418 md_number_to_chars (buf, 0, 4);
18419 break;
18420 #endif
18421
18422 case BFD_RELOC_RVA:
18423 case BFD_RELOC_32:
18424 case BFD_RELOC_ARM_TARGET1:
18425 case BFD_RELOC_ARM_ROSEGREL32:
18426 case BFD_RELOC_ARM_SBREL32:
18427 case BFD_RELOC_32_PCREL:
18428 #ifdef TE_PE
18429 case BFD_RELOC_32_SECREL:
18430 #endif
18431 if (fixP->fx_done || !seg->use_rela_p)
18432 #ifdef TE_WINCE
18433 /* For WinCE we only do this for pcrel fixups. */
18434 if (fixP->fx_done || fixP->fx_pcrel)
18435 #endif
18436 md_number_to_chars (buf, value, 4);
18437 break;
18438
18439 #ifdef OBJ_ELF
18440 case BFD_RELOC_ARM_PREL31:
18441 if (fixP->fx_done || !seg->use_rela_p)
18442 {
18443 newval = md_chars_to_number (buf, 4) & 0x80000000;
18444 if ((value ^ (value >> 1)) & 0x40000000)
18445 {
18446 as_bad_where (fixP->fx_file, fixP->fx_line,
18447 _("rel31 relocation overflow"));
18448 }
18449 newval |= value & 0x7fffffff;
18450 md_number_to_chars (buf, newval, 4);
18451 }
18452 break;
18453 #endif
18454
18455 case BFD_RELOC_ARM_CP_OFF_IMM:
18456 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18457 if (value < -1023 || value > 1023 || (value & 3))
18458 as_bad_where (fixP->fx_file, fixP->fx_line,
18459 _("co-processor offset out of range"));
18460 cp_off_common:
18461 sign = value >= 0;
18462 if (value < 0)
18463 value = -value;
18464 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18465 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18466 newval = md_chars_to_number (buf, INSN_SIZE);
18467 else
18468 newval = get_thumb32_insn (buf);
18469 newval &= 0xff7fff00;
18470 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
18471 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18472 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18473 md_number_to_chars (buf, newval, INSN_SIZE);
18474 else
18475 put_thumb32_insn (buf, newval);
18476 break;
18477
18478 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
18479 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
18480 if (value < -255 || value > 255)
18481 as_bad_where (fixP->fx_file, fixP->fx_line,
18482 _("co-processor offset out of range"));
18483 value *= 4;
18484 goto cp_off_common;
18485
18486 case BFD_RELOC_ARM_THUMB_OFFSET:
18487 newval = md_chars_to_number (buf, THUMB_SIZE);
18488 /* Exactly what ranges, and where the offset is inserted depends
18489 on the type of instruction, we can establish this from the
18490 top 4 bits. */
18491 switch (newval >> 12)
18492 {
18493 case 4: /* PC load. */
18494 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18495 forced to zero for these loads; md_pcrel_from has already
18496 compensated for this. */
18497 if (value & 3)
18498 as_bad_where (fixP->fx_file, fixP->fx_line,
18499 _("invalid offset, target not word aligned (0x%08lX)"),
18500 (((unsigned long) fixP->fx_frag->fr_address
18501 + (unsigned long) fixP->fx_where) & ~3)
18502 + (unsigned long) value);
18503
18504 if (value & ~0x3fc)
18505 as_bad_where (fixP->fx_file, fixP->fx_line,
18506 _("invalid offset, value too big (0x%08lX)"),
18507 (long) value);
18508
18509 newval |= value >> 2;
18510 break;
18511
18512 case 9: /* SP load/store. */
18513 if (value & ~0x3fc)
18514 as_bad_where (fixP->fx_file, fixP->fx_line,
18515 _("invalid offset, value too big (0x%08lX)"),
18516 (long) value);
18517 newval |= value >> 2;
18518 break;
18519
18520 case 6: /* Word load/store. */
18521 if (value & ~0x7c)
18522 as_bad_where (fixP->fx_file, fixP->fx_line,
18523 _("invalid offset, value too big (0x%08lX)"),
18524 (long) value);
18525 newval |= value << 4; /* 6 - 2. */
18526 break;
18527
18528 case 7: /* Byte load/store. */
18529 if (value & ~0x1f)
18530 as_bad_where (fixP->fx_file, fixP->fx_line,
18531 _("invalid offset, value too big (0x%08lX)"),
18532 (long) value);
18533 newval |= value << 6;
18534 break;
18535
18536 case 8: /* Halfword load/store. */
18537 if (value & ~0x3e)
18538 as_bad_where (fixP->fx_file, fixP->fx_line,
18539 _("invalid offset, value too big (0x%08lX)"),
18540 (long) value);
18541 newval |= value << 5; /* 6 - 1. */
18542 break;
18543
18544 default:
18545 as_bad_where (fixP->fx_file, fixP->fx_line,
18546 "Unable to process relocation for thumb opcode: %lx",
18547 (unsigned long) newval);
18548 break;
18549 }
18550 md_number_to_chars (buf, newval, THUMB_SIZE);
18551 break;
18552
18553 case BFD_RELOC_ARM_THUMB_ADD:
18554 /* This is a complicated relocation, since we use it for all of
18555 the following immediate relocations:
18556
18557 3bit ADD/SUB
18558 8bit ADD/SUB
18559 9bit ADD/SUB SP word-aligned
18560 10bit ADD PC/SP word-aligned
18561
18562 The type of instruction being processed is encoded in the
18563 instruction field:
18564
18565 0x8000 SUB
18566 0x00F0 Rd
18567 0x000F Rs
18568 */
18569 newval = md_chars_to_number (buf, THUMB_SIZE);
18570 {
18571 int rd = (newval >> 4) & 0xf;
18572 int rs = newval & 0xf;
18573 int subtract = !!(newval & 0x8000);
18574
18575 /* Check for HI regs, only very restricted cases allowed:
18576 Adjusting SP, and using PC or SP to get an address. */
18577 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
18578 || (rs > 7 && rs != REG_SP && rs != REG_PC))
18579 as_bad_where (fixP->fx_file, fixP->fx_line,
18580 _("invalid Hi register with immediate"));
18581
18582 /* If value is negative, choose the opposite instruction. */
18583 if (value < 0)
18584 {
18585 value = -value;
18586 subtract = !subtract;
18587 if (value < 0)
18588 as_bad_where (fixP->fx_file, fixP->fx_line,
18589 _("immediate value out of range"));
18590 }
18591
18592 if (rd == REG_SP)
18593 {
18594 if (value & ~0x1fc)
18595 as_bad_where (fixP->fx_file, fixP->fx_line,
18596 _("invalid immediate for stack address calculation"));
18597 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
18598 newval |= value >> 2;
18599 }
18600 else if (rs == REG_PC || rs == REG_SP)
18601 {
18602 if (subtract || value & ~0x3fc)
18603 as_bad_where (fixP->fx_file, fixP->fx_line,
18604 _("invalid immediate for address calculation (value = 0x%08lX)"),
18605 (unsigned long) value);
18606 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
18607 newval |= rd << 8;
18608 newval |= value >> 2;
18609 }
18610 else if (rs == rd)
18611 {
18612 if (value & ~0xff)
18613 as_bad_where (fixP->fx_file, fixP->fx_line,
18614 _("immediate value out of range"));
18615 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
18616 newval |= (rd << 8) | value;
18617 }
18618 else
18619 {
18620 if (value & ~0x7)
18621 as_bad_where (fixP->fx_file, fixP->fx_line,
18622 _("immediate value out of range"));
18623 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
18624 newval |= rd | (rs << 3) | (value << 6);
18625 }
18626 }
18627 md_number_to_chars (buf, newval, THUMB_SIZE);
18628 break;
18629
18630 case BFD_RELOC_ARM_THUMB_IMM:
18631 newval = md_chars_to_number (buf, THUMB_SIZE);
18632 if (value < 0 || value > 255)
18633 as_bad_where (fixP->fx_file, fixP->fx_line,
18634 _("invalid immediate: %ld is too large"),
18635 (long) value);
18636 newval |= value;
18637 md_number_to_chars (buf, newval, THUMB_SIZE);
18638 break;
18639
18640 case BFD_RELOC_ARM_THUMB_SHIFT:
18641 /* 5bit shift value (0..32). LSL cannot take 32. */
18642 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
18643 temp = newval & 0xf800;
18644 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
18645 as_bad_where (fixP->fx_file, fixP->fx_line,
18646 _("invalid shift value: %ld"), (long) value);
18647 /* Shifts of zero must be encoded as LSL. */
18648 if (value == 0)
18649 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
18650 /* Shifts of 32 are encoded as zero. */
18651 else if (value == 32)
18652 value = 0;
18653 newval |= value << 6;
18654 md_number_to_chars (buf, newval, THUMB_SIZE);
18655 break;
18656
18657 case BFD_RELOC_VTABLE_INHERIT:
18658 case BFD_RELOC_VTABLE_ENTRY:
18659 fixP->fx_done = 0;
18660 return;
18661
18662 case BFD_RELOC_ARM_MOVW:
18663 case BFD_RELOC_ARM_MOVT:
18664 case BFD_RELOC_ARM_THUMB_MOVW:
18665 case BFD_RELOC_ARM_THUMB_MOVT:
18666 if (fixP->fx_done || !seg->use_rela_p)
18667 {
18668 /* REL format relocations are limited to a 16-bit addend. */
18669 if (!fixP->fx_done)
18670 {
18671 if (value < -0x1000 || value > 0xffff)
18672 as_bad_where (fixP->fx_file, fixP->fx_line,
18673 _("offset too big"));
18674 }
18675 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
18676 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18677 {
18678 value >>= 16;
18679 }
18680
18681 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
18682 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18683 {
18684 newval = get_thumb32_insn (buf);
18685 newval &= 0xfbf08f00;
18686 newval |= (value & 0xf000) << 4;
18687 newval |= (value & 0x0800) << 15;
18688 newval |= (value & 0x0700) << 4;
18689 newval |= (value & 0x00ff);
18690 put_thumb32_insn (buf, newval);
18691 }
18692 else
18693 {
18694 newval = md_chars_to_number (buf, 4);
18695 newval &= 0xfff0f000;
18696 newval |= value & 0x0fff;
18697 newval |= (value & 0xf000) << 4;
18698 md_number_to_chars (buf, newval, 4);
18699 }
18700 }
18701 return;
18702
18703 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18704 case BFD_RELOC_ARM_ALU_PC_G0:
18705 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18706 case BFD_RELOC_ARM_ALU_PC_G1:
18707 case BFD_RELOC_ARM_ALU_PC_G2:
18708 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18709 case BFD_RELOC_ARM_ALU_SB_G0:
18710 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18711 case BFD_RELOC_ARM_ALU_SB_G1:
18712 case BFD_RELOC_ARM_ALU_SB_G2:
18713 assert (!fixP->fx_done);
18714 if (!seg->use_rela_p)
18715 {
18716 bfd_vma insn;
18717 bfd_vma encoded_addend;
18718 bfd_vma addend_abs = abs (value);
18719
18720 /* Check that the absolute value of the addend can be
18721 expressed as an 8-bit constant plus a rotation. */
18722 encoded_addend = encode_arm_immediate (addend_abs);
18723 if (encoded_addend == (unsigned int) FAIL)
18724 as_bad_where (fixP->fx_file, fixP->fx_line,
18725 _("the offset 0x%08lX is not representable"),
18726 addend_abs);
18727
18728 /* Extract the instruction. */
18729 insn = md_chars_to_number (buf, INSN_SIZE);
18730
18731 /* If the addend is positive, use an ADD instruction.
18732 Otherwise use a SUB. Take care not to destroy the S bit. */
18733 insn &= 0xff1fffff;
18734 if (value < 0)
18735 insn |= 1 << 22;
18736 else
18737 insn |= 1 << 23;
18738
18739 /* Place the encoded addend into the first 12 bits of the
18740 instruction. */
18741 insn &= 0xfffff000;
18742 insn |= encoded_addend;
18743
18744 /* Update the instruction. */
18745 md_number_to_chars (buf, insn, INSN_SIZE);
18746 }
18747 break;
18748
18749 case BFD_RELOC_ARM_LDR_PC_G0:
18750 case BFD_RELOC_ARM_LDR_PC_G1:
18751 case BFD_RELOC_ARM_LDR_PC_G2:
18752 case BFD_RELOC_ARM_LDR_SB_G0:
18753 case BFD_RELOC_ARM_LDR_SB_G1:
18754 case BFD_RELOC_ARM_LDR_SB_G2:
18755 assert (!fixP->fx_done);
18756 if (!seg->use_rela_p)
18757 {
18758 bfd_vma insn;
18759 bfd_vma addend_abs = abs (value);
18760
18761 /* Check that the absolute value of the addend can be
18762 encoded in 12 bits. */
18763 if (addend_abs >= 0x1000)
18764 as_bad_where (fixP->fx_file, fixP->fx_line,
18765 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18766 addend_abs);
18767
18768 /* Extract the instruction. */
18769 insn = md_chars_to_number (buf, INSN_SIZE);
18770
18771 /* If the addend is negative, clear bit 23 of the instruction.
18772 Otherwise set it. */
18773 if (value < 0)
18774 insn &= ~(1 << 23);
18775 else
18776 insn |= 1 << 23;
18777
18778 /* Place the absolute value of the addend into the first 12 bits
18779 of the instruction. */
18780 insn &= 0xfffff000;
18781 insn |= addend_abs;
18782
18783 /* Update the instruction. */
18784 md_number_to_chars (buf, insn, INSN_SIZE);
18785 }
18786 break;
18787
18788 case BFD_RELOC_ARM_LDRS_PC_G0:
18789 case BFD_RELOC_ARM_LDRS_PC_G1:
18790 case BFD_RELOC_ARM_LDRS_PC_G2:
18791 case BFD_RELOC_ARM_LDRS_SB_G0:
18792 case BFD_RELOC_ARM_LDRS_SB_G1:
18793 case BFD_RELOC_ARM_LDRS_SB_G2:
18794 assert (!fixP->fx_done);
18795 if (!seg->use_rela_p)
18796 {
18797 bfd_vma insn;
18798 bfd_vma addend_abs = abs (value);
18799
18800 /* Check that the absolute value of the addend can be
18801 encoded in 8 bits. */
18802 if (addend_abs >= 0x100)
18803 as_bad_where (fixP->fx_file, fixP->fx_line,
18804 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18805 addend_abs);
18806
18807 /* Extract the instruction. */
18808 insn = md_chars_to_number (buf, INSN_SIZE);
18809
18810 /* If the addend is negative, clear bit 23 of the instruction.
18811 Otherwise set it. */
18812 if (value < 0)
18813 insn &= ~(1 << 23);
18814 else
18815 insn |= 1 << 23;
18816
18817 /* Place the first four bits of the absolute value of the addend
18818 into the first 4 bits of the instruction, and the remaining
18819 four into bits 8 .. 11. */
18820 insn &= 0xfffff0f0;
18821 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
18822
18823 /* Update the instruction. */
18824 md_number_to_chars (buf, insn, INSN_SIZE);
18825 }
18826 break;
18827
18828 case BFD_RELOC_ARM_LDC_PC_G0:
18829 case BFD_RELOC_ARM_LDC_PC_G1:
18830 case BFD_RELOC_ARM_LDC_PC_G2:
18831 case BFD_RELOC_ARM_LDC_SB_G0:
18832 case BFD_RELOC_ARM_LDC_SB_G1:
18833 case BFD_RELOC_ARM_LDC_SB_G2:
18834 assert (!fixP->fx_done);
18835 if (!seg->use_rela_p)
18836 {
18837 bfd_vma insn;
18838 bfd_vma addend_abs = abs (value);
18839
18840 /* Check that the absolute value of the addend is a multiple of
18841 four and, when divided by four, fits in 8 bits. */
18842 if (addend_abs & 0x3)
18843 as_bad_where (fixP->fx_file, fixP->fx_line,
18844 _("bad offset 0x%08lX (must be word-aligned)"),
18845 addend_abs);
18846
18847 if ((addend_abs >> 2) > 0xff)
18848 as_bad_where (fixP->fx_file, fixP->fx_line,
18849 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18850 addend_abs);
18851
18852 /* Extract the instruction. */
18853 insn = md_chars_to_number (buf, INSN_SIZE);
18854
18855 /* If the addend is negative, clear bit 23 of the instruction.
18856 Otherwise set it. */
18857 if (value < 0)
18858 insn &= ~(1 << 23);
18859 else
18860 insn |= 1 << 23;
18861
18862 /* Place the addend (divided by four) into the first eight
18863 bits of the instruction. */
18864 insn &= 0xfffffff0;
18865 insn |= addend_abs >> 2;
18866
18867 /* Update the instruction. */
18868 md_number_to_chars (buf, insn, INSN_SIZE);
18869 }
18870 break;
18871
18872 case BFD_RELOC_UNUSED:
18873 default:
18874 as_bad_where (fixP->fx_file, fixP->fx_line,
18875 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
18876 }
18877 }
18878
18879 /* Translate internal representation of relocation info to BFD target
18880 format. */
18881
18882 arelent *
18883 tc_gen_reloc (asection *section, fixS *fixp)
18884 {
18885 arelent * reloc;
18886 bfd_reloc_code_real_type code;
18887
18888 reloc = xmalloc (sizeof (arelent));
18889
18890 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
18891 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
18892 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
18893
18894 if (fixp->fx_pcrel)
18895 {
18896 if (section->use_rela_p)
18897 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
18898 else
18899 fixp->fx_offset = reloc->address;
18900 }
18901 reloc->addend = fixp->fx_offset;
18902
18903 switch (fixp->fx_r_type)
18904 {
18905 case BFD_RELOC_8:
18906 if (fixp->fx_pcrel)
18907 {
18908 code = BFD_RELOC_8_PCREL;
18909 break;
18910 }
18911
18912 case BFD_RELOC_16:
18913 if (fixp->fx_pcrel)
18914 {
18915 code = BFD_RELOC_16_PCREL;
18916 break;
18917 }
18918
18919 case BFD_RELOC_32:
18920 if (fixp->fx_pcrel)
18921 {
18922 code = BFD_RELOC_32_PCREL;
18923 break;
18924 }
18925
18926 case BFD_RELOC_ARM_MOVW:
18927 if (fixp->fx_pcrel)
18928 {
18929 code = BFD_RELOC_ARM_MOVW_PCREL;
18930 break;
18931 }
18932
18933 case BFD_RELOC_ARM_MOVT:
18934 if (fixp->fx_pcrel)
18935 {
18936 code = BFD_RELOC_ARM_MOVT_PCREL;
18937 break;
18938 }
18939
18940 case BFD_RELOC_ARM_THUMB_MOVW:
18941 if (fixp->fx_pcrel)
18942 {
18943 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
18944 break;
18945 }
18946
18947 case BFD_RELOC_ARM_THUMB_MOVT:
18948 if (fixp->fx_pcrel)
18949 {
18950 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
18951 break;
18952 }
18953
18954 case BFD_RELOC_NONE:
18955 case BFD_RELOC_ARM_PCREL_BRANCH:
18956 case BFD_RELOC_ARM_PCREL_BLX:
18957 case BFD_RELOC_RVA:
18958 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18959 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18960 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18961 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18962 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18963 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18964 case BFD_RELOC_THUMB_PCREL_BLX:
18965 case BFD_RELOC_VTABLE_ENTRY:
18966 case BFD_RELOC_VTABLE_INHERIT:
18967 #ifdef TE_PE
18968 case BFD_RELOC_32_SECREL:
18969 #endif
18970 code = fixp->fx_r_type;
18971 break;
18972
18973 case BFD_RELOC_ARM_LITERAL:
18974 case BFD_RELOC_ARM_HWLITERAL:
18975 /* If this is called then the a literal has
18976 been referenced across a section boundary. */
18977 as_bad_where (fixp->fx_file, fixp->fx_line,
18978 _("literal referenced across section boundary"));
18979 return NULL;
18980
18981 #ifdef OBJ_ELF
18982 case BFD_RELOC_ARM_GOT32:
18983 case BFD_RELOC_ARM_GOTOFF:
18984 case BFD_RELOC_ARM_PLT32:
18985 case BFD_RELOC_ARM_TARGET1:
18986 case BFD_RELOC_ARM_ROSEGREL32:
18987 case BFD_RELOC_ARM_SBREL32:
18988 case BFD_RELOC_ARM_PREL31:
18989 case BFD_RELOC_ARM_TARGET2:
18990 case BFD_RELOC_ARM_TLS_LE32:
18991 case BFD_RELOC_ARM_TLS_LDO32:
18992 case BFD_RELOC_ARM_PCREL_CALL:
18993 case BFD_RELOC_ARM_PCREL_JUMP:
18994 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18995 case BFD_RELOC_ARM_ALU_PC_G0:
18996 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18997 case BFD_RELOC_ARM_ALU_PC_G1:
18998 case BFD_RELOC_ARM_ALU_PC_G2:
18999 case BFD_RELOC_ARM_LDR_PC_G0:
19000 case BFD_RELOC_ARM_LDR_PC_G1:
19001 case BFD_RELOC_ARM_LDR_PC_G2:
19002 case BFD_RELOC_ARM_LDRS_PC_G0:
19003 case BFD_RELOC_ARM_LDRS_PC_G1:
19004 case BFD_RELOC_ARM_LDRS_PC_G2:
19005 case BFD_RELOC_ARM_LDC_PC_G0:
19006 case BFD_RELOC_ARM_LDC_PC_G1:
19007 case BFD_RELOC_ARM_LDC_PC_G2:
19008 case BFD_RELOC_ARM_ALU_SB_G0_NC:
19009 case BFD_RELOC_ARM_ALU_SB_G0:
19010 case BFD_RELOC_ARM_ALU_SB_G1_NC:
19011 case BFD_RELOC_ARM_ALU_SB_G1:
19012 case BFD_RELOC_ARM_ALU_SB_G2:
19013 case BFD_RELOC_ARM_LDR_SB_G0:
19014 case BFD_RELOC_ARM_LDR_SB_G1:
19015 case BFD_RELOC_ARM_LDR_SB_G2:
19016 case BFD_RELOC_ARM_LDRS_SB_G0:
19017 case BFD_RELOC_ARM_LDRS_SB_G1:
19018 case BFD_RELOC_ARM_LDRS_SB_G2:
19019 case BFD_RELOC_ARM_LDC_SB_G0:
19020 case BFD_RELOC_ARM_LDC_SB_G1:
19021 case BFD_RELOC_ARM_LDC_SB_G2:
19022 code = fixp->fx_r_type;
19023 break;
19024
19025 case BFD_RELOC_ARM_TLS_GD32:
19026 case BFD_RELOC_ARM_TLS_IE32:
19027 case BFD_RELOC_ARM_TLS_LDM32:
19028 /* BFD will include the symbol's address in the addend.
19029 But we don't want that, so subtract it out again here. */
19030 if (!S_IS_COMMON (fixp->fx_addsy))
19031 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
19032 code = fixp->fx_r_type;
19033 break;
19034 #endif
19035
19036 case BFD_RELOC_ARM_IMMEDIATE:
19037 as_bad_where (fixp->fx_file, fixp->fx_line,
19038 _("internal relocation (type: IMMEDIATE) not fixed up"));
19039 return NULL;
19040
19041 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19042 as_bad_where (fixp->fx_file, fixp->fx_line,
19043 _("ADRL used for a symbol not defined in the same file"));
19044 return NULL;
19045
19046 case BFD_RELOC_ARM_OFFSET_IMM:
19047 if (section->use_rela_p)
19048 {
19049 code = fixp->fx_r_type;
19050 break;
19051 }
19052
19053 if (fixp->fx_addsy != NULL
19054 && !S_IS_DEFINED (fixp->fx_addsy)
19055 && S_IS_LOCAL (fixp->fx_addsy))
19056 {
19057 as_bad_where (fixp->fx_file, fixp->fx_line,
19058 _("undefined local label `%s'"),
19059 S_GET_NAME (fixp->fx_addsy));
19060 return NULL;
19061 }
19062
19063 as_bad_where (fixp->fx_file, fixp->fx_line,
19064 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
19065 return NULL;
19066
19067 default:
19068 {
19069 char * type;
19070
19071 switch (fixp->fx_r_type)
19072 {
19073 case BFD_RELOC_NONE: type = "NONE"; break;
19074 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
19075 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
19076 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
19077 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
19078 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
19079 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
19080 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
19081 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
19082 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
19083 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
19084 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
19085 default: type = _("<unknown>"); break;
19086 }
19087 as_bad_where (fixp->fx_file, fixp->fx_line,
19088 _("cannot represent %s relocation in this object file format"),
19089 type);
19090 return NULL;
19091 }
19092 }
19093
19094 #ifdef OBJ_ELF
19095 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
19096 && GOT_symbol
19097 && fixp->fx_addsy == GOT_symbol)
19098 {
19099 code = BFD_RELOC_ARM_GOTPC;
19100 reloc->addend = fixp->fx_offset = reloc->address;
19101 }
19102 #endif
19103
19104 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
19105
19106 if (reloc->howto == NULL)
19107 {
19108 as_bad_where (fixp->fx_file, fixp->fx_line,
19109 _("cannot represent %s relocation in this object file format"),
19110 bfd_get_reloc_code_name (code));
19111 return NULL;
19112 }
19113
19114 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19115 vtable entry to be used in the relocation's section offset. */
19116 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19117 reloc->address = fixp->fx_offset;
19118
19119 return reloc;
19120 }
19121
19122 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19123
19124 void
19125 cons_fix_new_arm (fragS * frag,
19126 int where,
19127 int size,
19128 expressionS * exp)
19129 {
19130 bfd_reloc_code_real_type type;
19131 int pcrel = 0;
19132
19133 /* Pick a reloc.
19134 FIXME: @@ Should look at CPU word size. */
19135 switch (size)
19136 {
19137 case 1:
19138 type = BFD_RELOC_8;
19139 break;
19140 case 2:
19141 type = BFD_RELOC_16;
19142 break;
19143 case 4:
19144 default:
19145 type = BFD_RELOC_32;
19146 break;
19147 case 8:
19148 type = BFD_RELOC_64;
19149 break;
19150 }
19151
19152 #ifdef TE_PE
19153 if (exp->X_op == O_secrel)
19154 {
19155 exp->X_op = O_symbol;
19156 type = BFD_RELOC_32_SECREL;
19157 }
19158 #endif
19159
19160 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
19161 }
19162
19163 #if defined OBJ_COFF || defined OBJ_ELF
19164 void
19165 arm_validate_fix (fixS * fixP)
19166 {
19167 /* If the destination of the branch is a defined symbol which does not have
19168 the THUMB_FUNC attribute, then we must be calling a function which has
19169 the (interfacearm) attribute. We look for the Thumb entry point to that
19170 function and change the branch to refer to that function instead. */
19171 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
19172 && fixP->fx_addsy != NULL
19173 && S_IS_DEFINED (fixP->fx_addsy)
19174 && ! THUMB_IS_FUNC (fixP->fx_addsy))
19175 {
19176 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
19177 }
19178 }
19179 #endif
19180
19181 int
19182 arm_force_relocation (struct fix * fixp)
19183 {
19184 #if defined (OBJ_COFF) && defined (TE_PE)
19185 if (fixp->fx_r_type == BFD_RELOC_RVA)
19186 return 1;
19187 #endif
19188
19189 /* Resolve these relocations even if the symbol is extern or weak. */
19190 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
19191 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
19192 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
19193 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
19194 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19195 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
19196 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
19197 return 0;
19198
19199 /* Always leave these relocations for the linker. */
19200 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19201 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19202 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19203 return 1;
19204
19205 /* Always generate relocations against function symbols. */
19206 if (fixp->fx_r_type == BFD_RELOC_32
19207 && fixp->fx_addsy
19208 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
19209 return 1;
19210
19211 return generic_force_reloc (fixp);
19212 }
19213
19214 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19215 /* Relocations against function names must be left unadjusted,
19216 so that the linker can use this information to generate interworking
19217 stubs. The MIPS version of this function
19218 also prevents relocations that are mips-16 specific, but I do not
19219 know why it does this.
19220
19221 FIXME:
19222 There is one other problem that ought to be addressed here, but
19223 which currently is not: Taking the address of a label (rather
19224 than a function) and then later jumping to that address. Such
19225 addresses also ought to have their bottom bit set (assuming that
19226 they reside in Thumb code), but at the moment they will not. */
19227
19228 bfd_boolean
19229 arm_fix_adjustable (fixS * fixP)
19230 {
19231 if (fixP->fx_addsy == NULL)
19232 return 1;
19233
19234 /* Preserve relocations against symbols with function type. */
19235 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
19236 return 0;
19237
19238 if (THUMB_IS_FUNC (fixP->fx_addsy)
19239 && fixP->fx_subsy == NULL)
19240 return 0;
19241
19242 /* We need the symbol name for the VTABLE entries. */
19243 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
19244 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19245 return 0;
19246
19247 /* Don't allow symbols to be discarded on GOT related relocs. */
19248 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
19249 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
19250 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
19251 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
19252 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
19253 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
19254 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
19255 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
19256 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
19257 return 0;
19258
19259 /* Similarly for group relocations. */
19260 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19261 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19262 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19263 return 0;
19264
19265 return 1;
19266 }
19267 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19268
19269 #ifdef OBJ_ELF
19270
19271 const char *
19272 elf32_arm_target_format (void)
19273 {
19274 #ifdef TE_SYMBIAN
19275 return (target_big_endian
19276 ? "elf32-bigarm-symbian"
19277 : "elf32-littlearm-symbian");
19278 #elif defined (TE_VXWORKS)
19279 return (target_big_endian
19280 ? "elf32-bigarm-vxworks"
19281 : "elf32-littlearm-vxworks");
19282 #else
19283 if (target_big_endian)
19284 return "elf32-bigarm";
19285 else
19286 return "elf32-littlearm";
19287 #endif
19288 }
19289
19290 void
19291 armelf_frob_symbol (symbolS * symp,
19292 int * puntp)
19293 {
19294 elf_frob_symbol (symp, puntp);
19295 }
19296 #endif
19297
19298 /* MD interface: Finalization. */
19299
19300 /* A good place to do this, although this was probably not intended
19301 for this kind of use. We need to dump the literal pool before
19302 references are made to a null symbol pointer. */
19303
19304 void
19305 arm_cleanup (void)
19306 {
19307 literal_pool * pool;
19308
19309 for (pool = list_of_pools; pool; pool = pool->next)
19310 {
19311 /* Put it at the end of the relevent section. */
19312 subseg_set (pool->section, pool->sub_section);
19313 #ifdef OBJ_ELF
19314 arm_elf_change_section ();
19315 #endif
19316 s_ltorg (0);
19317 }
19318 }
19319
19320 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19321 ARM ones. */
19322
19323 void
19324 arm_adjust_symtab (void)
19325 {
19326 #ifdef OBJ_COFF
19327 symbolS * sym;
19328
19329 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19330 {
19331 if (ARM_IS_THUMB (sym))
19332 {
19333 if (THUMB_IS_FUNC (sym))
19334 {
19335 /* Mark the symbol as a Thumb function. */
19336 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
19337 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
19338 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
19339
19340 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
19341 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
19342 else
19343 as_bad (_("%s: unexpected function type: %d"),
19344 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
19345 }
19346 else switch (S_GET_STORAGE_CLASS (sym))
19347 {
19348 case C_EXT:
19349 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
19350 break;
19351 case C_STAT:
19352 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
19353 break;
19354 case C_LABEL:
19355 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
19356 break;
19357 default:
19358 /* Do nothing. */
19359 break;
19360 }
19361 }
19362
19363 if (ARM_IS_INTERWORK (sym))
19364 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
19365 }
19366 #endif
19367 #ifdef OBJ_ELF
19368 symbolS * sym;
19369 char bind;
19370
19371 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19372 {
19373 if (ARM_IS_THUMB (sym))
19374 {
19375 elf_symbol_type * elf_sym;
19376
19377 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
19378 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
19379
19380 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
19381 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
19382 {
19383 /* If it's a .thumb_func, declare it as so,
19384 otherwise tag label as .code 16. */
19385 if (THUMB_IS_FUNC (sym))
19386 elf_sym->internal_elf_sym.st_info =
19387 ELF_ST_INFO (bind, STT_ARM_TFUNC);
19388 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
19389 elf_sym->internal_elf_sym.st_info =
19390 ELF_ST_INFO (bind, STT_ARM_16BIT);
19391 }
19392 }
19393 }
19394 #endif
19395 }
19396
19397 /* MD interface: Initialization. */
19398
19399 static void
19400 set_constant_flonums (void)
19401 {
19402 int i;
19403
19404 for (i = 0; i < NUM_FLOAT_VALS; i++)
19405 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
19406 abort ();
19407 }
19408
19409 /* Auto-select Thumb mode if it's the only available instruction set for the
19410 given architecture. */
19411
19412 static void
19413 autoselect_thumb_from_cpu_variant (void)
19414 {
19415 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19416 opcode_select (16);
19417 }
19418
19419 void
19420 md_begin (void)
19421 {
19422 unsigned mach;
19423 unsigned int i;
19424
19425 if ( (arm_ops_hsh = hash_new ()) == NULL
19426 || (arm_cond_hsh = hash_new ()) == NULL
19427 || (arm_shift_hsh = hash_new ()) == NULL
19428 || (arm_psr_hsh = hash_new ()) == NULL
19429 || (arm_v7m_psr_hsh = hash_new ()) == NULL
19430 || (arm_reg_hsh = hash_new ()) == NULL
19431 || (arm_reloc_hsh = hash_new ()) == NULL
19432 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
19433 as_fatal (_("virtual memory exhausted"));
19434
19435 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
19436 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
19437 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
19438 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
19439 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
19440 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
19441 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
19442 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
19443 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
19444 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
19445 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
19446 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
19447 for (i = 0;
19448 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
19449 i++)
19450 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
19451 (PTR) (barrier_opt_names + i));
19452 #ifdef OBJ_ELF
19453 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
19454 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
19455 #endif
19456
19457 set_constant_flonums ();
19458
19459 /* Set the cpu variant based on the command-line options. We prefer
19460 -mcpu= over -march= if both are set (as for GCC); and we prefer
19461 -mfpu= over any other way of setting the floating point unit.
19462 Use of legacy options with new options are faulted. */
19463 if (legacy_cpu)
19464 {
19465 if (mcpu_cpu_opt || march_cpu_opt)
19466 as_bad (_("use of old and new-style options to set CPU type"));
19467
19468 mcpu_cpu_opt = legacy_cpu;
19469 }
19470 else if (!mcpu_cpu_opt)
19471 mcpu_cpu_opt = march_cpu_opt;
19472
19473 if (legacy_fpu)
19474 {
19475 if (mfpu_opt)
19476 as_bad (_("use of old and new-style options to set FPU type"));
19477
19478 mfpu_opt = legacy_fpu;
19479 }
19480 else if (!mfpu_opt)
19481 {
19482 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19483 /* Some environments specify a default FPU. If they don't, infer it
19484 from the processor. */
19485 if (mcpu_fpu_opt)
19486 mfpu_opt = mcpu_fpu_opt;
19487 else
19488 mfpu_opt = march_fpu_opt;
19489 #else
19490 mfpu_opt = &fpu_default;
19491 #endif
19492 }
19493
19494 if (!mfpu_opt)
19495 {
19496 if (mcpu_cpu_opt != NULL)
19497 mfpu_opt = &fpu_default;
19498 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
19499 mfpu_opt = &fpu_arch_vfp_v2;
19500 else
19501 mfpu_opt = &fpu_arch_fpa;
19502 }
19503
19504 #ifdef CPU_DEFAULT
19505 if (!mcpu_cpu_opt)
19506 {
19507 mcpu_cpu_opt = &cpu_default;
19508 selected_cpu = cpu_default;
19509 }
19510 #else
19511 if (mcpu_cpu_opt)
19512 selected_cpu = *mcpu_cpu_opt;
19513 else
19514 mcpu_cpu_opt = &arm_arch_any;
19515 #endif
19516
19517 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19518
19519 autoselect_thumb_from_cpu_variant ();
19520
19521 arm_arch_used = thumb_arch_used = arm_arch_none;
19522
19523 #if defined OBJ_COFF || defined OBJ_ELF
19524 {
19525 unsigned int flags = 0;
19526
19527 #if defined OBJ_ELF
19528 flags = meabi_flags;
19529
19530 switch (meabi_flags)
19531 {
19532 case EF_ARM_EABI_UNKNOWN:
19533 #endif
19534 /* Set the flags in the private structure. */
19535 if (uses_apcs_26) flags |= F_APCS26;
19536 if (support_interwork) flags |= F_INTERWORK;
19537 if (uses_apcs_float) flags |= F_APCS_FLOAT;
19538 if (pic_code) flags |= F_PIC;
19539 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
19540 flags |= F_SOFT_FLOAT;
19541
19542 switch (mfloat_abi_opt)
19543 {
19544 case ARM_FLOAT_ABI_SOFT:
19545 case ARM_FLOAT_ABI_SOFTFP:
19546 flags |= F_SOFT_FLOAT;
19547 break;
19548
19549 case ARM_FLOAT_ABI_HARD:
19550 if (flags & F_SOFT_FLOAT)
19551 as_bad (_("hard-float conflicts with specified fpu"));
19552 break;
19553 }
19554
19555 /* Using pure-endian doubles (even if soft-float). */
19556 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
19557 flags |= F_VFP_FLOAT;
19558
19559 #if defined OBJ_ELF
19560 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
19561 flags |= EF_ARM_MAVERICK_FLOAT;
19562 break;
19563
19564 case EF_ARM_EABI_VER4:
19565 case EF_ARM_EABI_VER5:
19566 /* No additional flags to set. */
19567 break;
19568
19569 default:
19570 abort ();
19571 }
19572 #endif
19573 bfd_set_private_flags (stdoutput, flags);
19574
19575 /* We have run out flags in the COFF header to encode the
19576 status of ATPCS support, so instead we create a dummy,
19577 empty, debug section called .arm.atpcs. */
19578 if (atpcs)
19579 {
19580 asection * sec;
19581
19582 sec = bfd_make_section (stdoutput, ".arm.atpcs");
19583
19584 if (sec != NULL)
19585 {
19586 bfd_set_section_flags
19587 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
19588 bfd_set_section_size (stdoutput, sec, 0);
19589 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
19590 }
19591 }
19592 }
19593 #endif
19594
19595 /* Record the CPU type as well. */
19596 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
19597 mach = bfd_mach_arm_iWMMXt2;
19598 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
19599 mach = bfd_mach_arm_iWMMXt;
19600 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
19601 mach = bfd_mach_arm_XScale;
19602 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
19603 mach = bfd_mach_arm_ep9312;
19604 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
19605 mach = bfd_mach_arm_5TE;
19606 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
19607 {
19608 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19609 mach = bfd_mach_arm_5T;
19610 else
19611 mach = bfd_mach_arm_5;
19612 }
19613 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
19614 {
19615 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19616 mach = bfd_mach_arm_4T;
19617 else
19618 mach = bfd_mach_arm_4;
19619 }
19620 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
19621 mach = bfd_mach_arm_3M;
19622 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
19623 mach = bfd_mach_arm_3;
19624 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
19625 mach = bfd_mach_arm_2a;
19626 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
19627 mach = bfd_mach_arm_2;
19628 else
19629 mach = bfd_mach_arm_unknown;
19630
19631 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
19632 }
19633
19634 /* Command line processing. */
19635
19636 /* md_parse_option
19637 Invocation line includes a switch not recognized by the base assembler.
19638 See if it's a processor-specific option.
19639
19640 This routine is somewhat complicated by the need for backwards
19641 compatibility (since older releases of gcc can't be changed).
19642 The new options try to make the interface as compatible as
19643 possible with GCC.
19644
19645 New options (supported) are:
19646
19647 -mcpu=<cpu name> Assemble for selected processor
19648 -march=<architecture name> Assemble for selected architecture
19649 -mfpu=<fpu architecture> Assemble for selected FPU.
19650 -EB/-mbig-endian Big-endian
19651 -EL/-mlittle-endian Little-endian
19652 -k Generate PIC code
19653 -mthumb Start in Thumb mode
19654 -mthumb-interwork Code supports ARM/Thumb interworking
19655
19656 For now we will also provide support for:
19657
19658 -mapcs-32 32-bit Program counter
19659 -mapcs-26 26-bit Program counter
19660 -macps-float Floats passed in FP registers
19661 -mapcs-reentrant Reentrant code
19662 -matpcs
19663 (sometime these will probably be replaced with -mapcs=<list of options>
19664 and -matpcs=<list of options>)
19665
19666 The remaining options are only supported for back-wards compatibility.
19667 Cpu variants, the arm part is optional:
19668 -m[arm]1 Currently not supported.
19669 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19670 -m[arm]3 Arm 3 processor
19671 -m[arm]6[xx], Arm 6 processors
19672 -m[arm]7[xx][t][[d]m] Arm 7 processors
19673 -m[arm]8[10] Arm 8 processors
19674 -m[arm]9[20][tdmi] Arm 9 processors
19675 -mstrongarm[110[0]] StrongARM processors
19676 -mxscale XScale processors
19677 -m[arm]v[2345[t[e]]] Arm architectures
19678 -mall All (except the ARM1)
19679 FP variants:
19680 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19681 -mfpe-old (No float load/store multiples)
19682 -mvfpxd VFP Single precision
19683 -mvfp All VFP
19684 -mno-fpu Disable all floating point instructions
19685
19686 The following CPU names are recognized:
19687 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19688 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19689 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19690 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19691 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19692 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19693 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19694
19695 */
19696
19697 const char * md_shortopts = "m:k";
19698
19699 #ifdef ARM_BI_ENDIAN
19700 #define OPTION_EB (OPTION_MD_BASE + 0)
19701 #define OPTION_EL (OPTION_MD_BASE + 1)
19702 #else
19703 #if TARGET_BYTES_BIG_ENDIAN
19704 #define OPTION_EB (OPTION_MD_BASE + 0)
19705 #else
19706 #define OPTION_EL (OPTION_MD_BASE + 1)
19707 #endif
19708 #endif
19709
19710 struct option md_longopts[] =
19711 {
19712 #ifdef OPTION_EB
19713 {"EB", no_argument, NULL, OPTION_EB},
19714 #endif
19715 #ifdef OPTION_EL
19716 {"EL", no_argument, NULL, OPTION_EL},
19717 #endif
19718 {NULL, no_argument, NULL, 0}
19719 };
19720
19721 size_t md_longopts_size = sizeof (md_longopts);
19722
19723 struct arm_option_table
19724 {
19725 char *option; /* Option name to match. */
19726 char *help; /* Help information. */
19727 int *var; /* Variable to change. */
19728 int value; /* What to change it to. */
19729 char *deprecated; /* If non-null, print this message. */
19730 };
19731
19732 struct arm_option_table arm_opts[] =
19733 {
19734 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
19735 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
19736 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19737 &support_interwork, 1, NULL},
19738 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
19739 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
19740 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
19741 1, NULL},
19742 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
19743 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
19744 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
19745 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
19746 NULL},
19747
19748 /* These are recognized by the assembler, but have no affect on code. */
19749 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
19750 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
19751 {NULL, NULL, NULL, 0, NULL}
19752 };
19753
19754 struct arm_legacy_option_table
19755 {
19756 char *option; /* Option name to match. */
19757 const arm_feature_set **var; /* Variable to change. */
19758 const arm_feature_set value; /* What to change it to. */
19759 char *deprecated; /* If non-null, print this message. */
19760 };
19761
19762 const struct arm_legacy_option_table arm_legacy_opts[] =
19763 {
19764 /* DON'T add any new processors to this list -- we want the whole list
19765 to go away... Add them to the processors table instead. */
19766 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19767 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19768 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19769 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19770 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19771 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19772 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19773 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19774 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19775 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19776 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19777 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19778 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19779 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19780 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19781 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19782 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19783 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19784 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19785 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19786 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19787 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19788 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19789 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19790 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19791 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19792 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19793 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19794 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19795 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19796 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19797 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19798 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19799 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19800 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19801 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19802 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19803 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19804 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19805 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19806 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19807 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19808 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19809 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19810 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19811 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19812 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19813 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19814 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19815 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19816 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19817 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19818 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19819 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19820 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19821 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19822 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19823 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19824 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19825 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19826 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19827 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19828 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19829 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19830 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19831 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19832 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19833 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19834 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
19835 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
19836 N_("use -mcpu=strongarm110")},
19837 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
19838 N_("use -mcpu=strongarm1100")},
19839 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
19840 N_("use -mcpu=strongarm1110")},
19841 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
19842 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
19843 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
19844
19845 /* Architecture variants -- don't add any more to this list either. */
19846 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19847 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19848 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19849 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19850 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19851 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19852 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19853 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19854 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19855 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19856 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19857 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19858 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19859 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19860 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19861 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19862 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19863 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19864
19865 /* Floating point variants -- don't add any more to this list either. */
19866 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
19867 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
19868 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
19869 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
19870 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19871
19872 {NULL, NULL, ARM_ARCH_NONE, NULL}
19873 };
19874
19875 struct arm_cpu_option_table
19876 {
19877 char *name;
19878 const arm_feature_set value;
19879 /* For some CPUs we assume an FPU unless the user explicitly sets
19880 -mfpu=... */
19881 const arm_feature_set default_fpu;
19882 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19883 case. */
19884 const char *canonical_name;
19885 };
19886
19887 /* This list should, at a minimum, contain all the cpu names
19888 recognized by GCC. */
19889 static const struct arm_cpu_option_table arm_cpus[] =
19890 {
19891 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
19892 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
19893 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
19894 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19895 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19896 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19897 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19898 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19899 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19900 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19901 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19902 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19903 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19904 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19905 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19906 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19907 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19908 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19909 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19910 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19911 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19912 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19913 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19914 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19915 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19916 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19917 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19918 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19919 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19920 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19921 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19922 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19923 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19924 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19925 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19926 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19927 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19928 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19929 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19930 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
19931 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19932 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19933 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19934 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19935 /* For V5 or later processors we default to using VFP; but the user
19936 should really set the FPU type explicitly. */
19937 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19938 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19939 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19940 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19941 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19942 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19943 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
19944 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19945 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19946 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
19947 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19948 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19949 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19950 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19951 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19952 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
19953 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19954 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19955 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19956 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
19957 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19958 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
19959 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
19960 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
19961 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
19962 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
19963 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
19964 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
19965 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
19966 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
19967 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
19968 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
19969 | FPU_NEON_EXT_V1),
19970 NULL},
19971 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
19972 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
19973 /* ??? XSCALE is really an architecture. */
19974 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19975 /* ??? iwmmxt is not a processor. */
19976 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
19977 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
19978 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19979 /* Maverick */
19980 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
19981 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
19982 };
19983
19984 struct arm_arch_option_table
19985 {
19986 char *name;
19987 const arm_feature_set value;
19988 const arm_feature_set default_fpu;
19989 };
19990
19991 /* This list should, at a minimum, contain all the architecture names
19992 recognized by GCC. */
19993 static const struct arm_arch_option_table arm_archs[] =
19994 {
19995 {"all", ARM_ANY, FPU_ARCH_FPA},
19996 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
19997 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
19998 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
19999 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
20000 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
20001 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
20002 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
20003 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
20004 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
20005 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
20006 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
20007 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
20008 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
20009 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
20010 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
20011 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
20012 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
20013 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
20014 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
20015 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
20016 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
20017 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
20018 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
20019 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
20020 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
20021 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
20022 /* The official spelling of the ARMv7 profile variants is the dashed form.
20023 Accept the non-dashed form for compatibility with old toolchains. */
20024 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
20025 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
20026 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20027 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
20028 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
20029 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20030 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
20031 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
20032 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
20033 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
20034 };
20035
20036 /* ISA extensions in the co-processor space. */
20037 struct arm_option_cpu_value_table
20038 {
20039 char *name;
20040 const arm_feature_set value;
20041 };
20042
20043 static const struct arm_option_cpu_value_table arm_extensions[] =
20044 {
20045 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
20046 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
20047 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
20048 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
20049 {NULL, ARM_ARCH_NONE}
20050 };
20051
20052 /* This list should, at a minimum, contain all the fpu names
20053 recognized by GCC. */
20054 static const struct arm_option_cpu_value_table arm_fpus[] =
20055 {
20056 {"softfpa", FPU_NONE},
20057 {"fpe", FPU_ARCH_FPE},
20058 {"fpe2", FPU_ARCH_FPE},
20059 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
20060 {"fpa", FPU_ARCH_FPA},
20061 {"fpa10", FPU_ARCH_FPA},
20062 {"fpa11", FPU_ARCH_FPA},
20063 {"arm7500fe", FPU_ARCH_FPA},
20064 {"softvfp", FPU_ARCH_VFP},
20065 {"softvfp+vfp", FPU_ARCH_VFP_V2},
20066 {"vfp", FPU_ARCH_VFP_V2},
20067 {"vfp9", FPU_ARCH_VFP_V2},
20068 {"vfp3", FPU_ARCH_VFP_V3},
20069 {"vfp10", FPU_ARCH_VFP_V2},
20070 {"vfp10-r0", FPU_ARCH_VFP_V1},
20071 {"vfpxd", FPU_ARCH_VFP_V1xD},
20072 {"arm1020t", FPU_ARCH_VFP_V1},
20073 {"arm1020e", FPU_ARCH_VFP_V2},
20074 {"arm1136jfs", FPU_ARCH_VFP_V2},
20075 {"arm1136jf-s", FPU_ARCH_VFP_V2},
20076 {"maverick", FPU_ARCH_MAVERICK},
20077 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
20078 {NULL, ARM_ARCH_NONE}
20079 };
20080
20081 struct arm_option_value_table
20082 {
20083 char *name;
20084 long value;
20085 };
20086
20087 static const struct arm_option_value_table arm_float_abis[] =
20088 {
20089 {"hard", ARM_FLOAT_ABI_HARD},
20090 {"softfp", ARM_FLOAT_ABI_SOFTFP},
20091 {"soft", ARM_FLOAT_ABI_SOFT},
20092 {NULL, 0}
20093 };
20094
20095 #ifdef OBJ_ELF
20096 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
20097 static const struct arm_option_value_table arm_eabis[] =
20098 {
20099 {"gnu", EF_ARM_EABI_UNKNOWN},
20100 {"4", EF_ARM_EABI_VER4},
20101 {"5", EF_ARM_EABI_VER5},
20102 {NULL, 0}
20103 };
20104 #endif
20105
20106 struct arm_long_option_table
20107 {
20108 char * option; /* Substring to match. */
20109 char * help; /* Help information. */
20110 int (* func) (char * subopt); /* Function to decode sub-option. */
20111 char * deprecated; /* If non-null, print this message. */
20112 };
20113
20114 static int
20115 arm_parse_extension (char * str, const arm_feature_set **opt_p)
20116 {
20117 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
20118
20119 /* Copy the feature set, so that we can modify it. */
20120 *ext_set = **opt_p;
20121 *opt_p = ext_set;
20122
20123 while (str != NULL && *str != 0)
20124 {
20125 const struct arm_option_cpu_value_table * opt;
20126 char * ext;
20127 int optlen;
20128
20129 if (*str != '+')
20130 {
20131 as_bad (_("invalid architectural extension"));
20132 return 0;
20133 }
20134
20135 str++;
20136 ext = strchr (str, '+');
20137
20138 if (ext != NULL)
20139 optlen = ext - str;
20140 else
20141 optlen = strlen (str);
20142
20143 if (optlen == 0)
20144 {
20145 as_bad (_("missing architectural extension"));
20146 return 0;
20147 }
20148
20149 for (opt = arm_extensions; opt->name != NULL; opt++)
20150 if (strncmp (opt->name, str, optlen) == 0)
20151 {
20152 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
20153 break;
20154 }
20155
20156 if (opt->name == NULL)
20157 {
20158 as_bad (_("unknown architectural extnsion `%s'"), str);
20159 return 0;
20160 }
20161
20162 str = ext;
20163 };
20164
20165 return 1;
20166 }
20167
20168 static int
20169 arm_parse_cpu (char * str)
20170 {
20171 const struct arm_cpu_option_table * opt;
20172 char * ext = strchr (str, '+');
20173 int optlen;
20174
20175 if (ext != NULL)
20176 optlen = ext - str;
20177 else
20178 optlen = strlen (str);
20179
20180 if (optlen == 0)
20181 {
20182 as_bad (_("missing cpu name `%s'"), str);
20183 return 0;
20184 }
20185
20186 for (opt = arm_cpus; opt->name != NULL; opt++)
20187 if (strncmp (opt->name, str, optlen) == 0)
20188 {
20189 mcpu_cpu_opt = &opt->value;
20190 mcpu_fpu_opt = &opt->default_fpu;
20191 if (opt->canonical_name)
20192 strcpy(selected_cpu_name, opt->canonical_name);
20193 else
20194 {
20195 int i;
20196 for (i = 0; i < optlen; i++)
20197 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20198 selected_cpu_name[i] = 0;
20199 }
20200
20201 if (ext != NULL)
20202 return arm_parse_extension (ext, &mcpu_cpu_opt);
20203
20204 return 1;
20205 }
20206
20207 as_bad (_("unknown cpu `%s'"), str);
20208 return 0;
20209 }
20210
20211 static int
20212 arm_parse_arch (char * str)
20213 {
20214 const struct arm_arch_option_table *opt;
20215 char *ext = strchr (str, '+');
20216 int optlen;
20217
20218 if (ext != NULL)
20219 optlen = ext - str;
20220 else
20221 optlen = strlen (str);
20222
20223 if (optlen == 0)
20224 {
20225 as_bad (_("missing architecture name `%s'"), str);
20226 return 0;
20227 }
20228
20229 for (opt = arm_archs; opt->name != NULL; opt++)
20230 if (streq (opt->name, str))
20231 {
20232 march_cpu_opt = &opt->value;
20233 march_fpu_opt = &opt->default_fpu;
20234 strcpy(selected_cpu_name, opt->name);
20235
20236 if (ext != NULL)
20237 return arm_parse_extension (ext, &march_cpu_opt);
20238
20239 return 1;
20240 }
20241
20242 as_bad (_("unknown architecture `%s'\n"), str);
20243 return 0;
20244 }
20245
20246 static int
20247 arm_parse_fpu (char * str)
20248 {
20249 const struct arm_option_cpu_value_table * opt;
20250
20251 for (opt = arm_fpus; opt->name != NULL; opt++)
20252 if (streq (opt->name, str))
20253 {
20254 mfpu_opt = &opt->value;
20255 return 1;
20256 }
20257
20258 as_bad (_("unknown floating point format `%s'\n"), str);
20259 return 0;
20260 }
20261
20262 static int
20263 arm_parse_float_abi (char * str)
20264 {
20265 const struct arm_option_value_table * opt;
20266
20267 for (opt = arm_float_abis; opt->name != NULL; opt++)
20268 if (streq (opt->name, str))
20269 {
20270 mfloat_abi_opt = opt->value;
20271 return 1;
20272 }
20273
20274 as_bad (_("unknown floating point abi `%s'\n"), str);
20275 return 0;
20276 }
20277
20278 #ifdef OBJ_ELF
20279 static int
20280 arm_parse_eabi (char * str)
20281 {
20282 const struct arm_option_value_table *opt;
20283
20284 for (opt = arm_eabis; opt->name != NULL; opt++)
20285 if (streq (opt->name, str))
20286 {
20287 meabi_flags = opt->value;
20288 return 1;
20289 }
20290 as_bad (_("unknown EABI `%s'\n"), str);
20291 return 0;
20292 }
20293 #endif
20294
20295 struct arm_long_option_table arm_long_opts[] =
20296 {
20297 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20298 arm_parse_cpu, NULL},
20299 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20300 arm_parse_arch, NULL},
20301 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20302 arm_parse_fpu, NULL},
20303 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20304 arm_parse_float_abi, NULL},
20305 #ifdef OBJ_ELF
20306 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20307 arm_parse_eabi, NULL},
20308 #endif
20309 {NULL, NULL, 0, NULL}
20310 };
20311
20312 int
20313 md_parse_option (int c, char * arg)
20314 {
20315 struct arm_option_table *opt;
20316 const struct arm_legacy_option_table *fopt;
20317 struct arm_long_option_table *lopt;
20318
20319 switch (c)
20320 {
20321 #ifdef OPTION_EB
20322 case OPTION_EB:
20323 target_big_endian = 1;
20324 break;
20325 #endif
20326
20327 #ifdef OPTION_EL
20328 case OPTION_EL:
20329 target_big_endian = 0;
20330 break;
20331 #endif
20332
20333 case 'a':
20334 /* Listing option. Just ignore these, we don't support additional
20335 ones. */
20336 return 0;
20337
20338 default:
20339 for (opt = arm_opts; opt->option != NULL; opt++)
20340 {
20341 if (c == opt->option[0]
20342 && ((arg == NULL && opt->option[1] == 0)
20343 || streq (arg, opt->option + 1)))
20344 {
20345 #if WARN_DEPRECATED
20346 /* If the option is deprecated, tell the user. */
20347 if (opt->deprecated != NULL)
20348 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20349 arg ? arg : "", _(opt->deprecated));
20350 #endif
20351
20352 if (opt->var != NULL)
20353 *opt->var = opt->value;
20354
20355 return 1;
20356 }
20357 }
20358
20359 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
20360 {
20361 if (c == fopt->option[0]
20362 && ((arg == NULL && fopt->option[1] == 0)
20363 || streq (arg, fopt->option + 1)))
20364 {
20365 #if WARN_DEPRECATED
20366 /* If the option is deprecated, tell the user. */
20367 if (fopt->deprecated != NULL)
20368 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20369 arg ? arg : "", _(fopt->deprecated));
20370 #endif
20371
20372 if (fopt->var != NULL)
20373 *fopt->var = &fopt->value;
20374
20375 return 1;
20376 }
20377 }
20378
20379 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20380 {
20381 /* These options are expected to have an argument. */
20382 if (c == lopt->option[0]
20383 && arg != NULL
20384 && strncmp (arg, lopt->option + 1,
20385 strlen (lopt->option + 1)) == 0)
20386 {
20387 #if WARN_DEPRECATED
20388 /* If the option is deprecated, tell the user. */
20389 if (lopt->deprecated != NULL)
20390 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
20391 _(lopt->deprecated));
20392 #endif
20393
20394 /* Call the sup-option parser. */
20395 return lopt->func (arg + strlen (lopt->option) - 1);
20396 }
20397 }
20398
20399 return 0;
20400 }
20401
20402 return 1;
20403 }
20404
20405 void
20406 md_show_usage (FILE * fp)
20407 {
20408 struct arm_option_table *opt;
20409 struct arm_long_option_table *lopt;
20410
20411 fprintf (fp, _(" ARM-specific assembler options:\n"));
20412
20413 for (opt = arm_opts; opt->option != NULL; opt++)
20414 if (opt->help != NULL)
20415 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
20416
20417 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20418 if (lopt->help != NULL)
20419 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
20420
20421 #ifdef OPTION_EB
20422 fprintf (fp, _("\
20423 -EB assemble code for a big-endian cpu\n"));
20424 #endif
20425
20426 #ifdef OPTION_EL
20427 fprintf (fp, _("\
20428 -EL assemble code for a little-endian cpu\n"));
20429 #endif
20430 }
20431
20432
20433 #ifdef OBJ_ELF
20434 typedef struct
20435 {
20436 int val;
20437 arm_feature_set flags;
20438 } cpu_arch_ver_table;
20439
20440 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20441 least features first. */
20442 static const cpu_arch_ver_table cpu_arch_ver[] =
20443 {
20444 {1, ARM_ARCH_V4},
20445 {2, ARM_ARCH_V4T},
20446 {3, ARM_ARCH_V5},
20447 {4, ARM_ARCH_V5TE},
20448 {5, ARM_ARCH_V5TEJ},
20449 {6, ARM_ARCH_V6},
20450 {7, ARM_ARCH_V6Z},
20451 {8, ARM_ARCH_V6K},
20452 {9, ARM_ARCH_V6T2},
20453 {10, ARM_ARCH_V7A},
20454 {10, ARM_ARCH_V7R},
20455 {10, ARM_ARCH_V7M},
20456 {0, ARM_ARCH_NONE}
20457 };
20458
20459 /* Set the public EABI object attributes. */
20460 static void
20461 aeabi_set_public_attributes (void)
20462 {
20463 int arch;
20464 arm_feature_set flags;
20465 arm_feature_set tmp;
20466 const cpu_arch_ver_table *p;
20467
20468 /* Choose the architecture based on the capabilities of the requested cpu
20469 (if any) and/or the instructions actually used. */
20470 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
20471 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
20472 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
20473 /*Allow the user to override the reported architecture. */
20474 if (object_arch)
20475 {
20476 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
20477 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
20478 }
20479
20480 tmp = flags;
20481 arch = 0;
20482 for (p = cpu_arch_ver; p->val; p++)
20483 {
20484 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
20485 {
20486 arch = p->val;
20487 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
20488 }
20489 }
20490
20491 /* Tag_CPU_name. */
20492 if (selected_cpu_name[0])
20493 {
20494 char *p;
20495
20496 p = selected_cpu_name;
20497 if (strncmp(p, "armv", 4) == 0)
20498 {
20499 int i;
20500
20501 p += 4;
20502 for (i = 0; p[i]; i++)
20503 p[i] = TOUPPER (p[i]);
20504 }
20505 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
20506 }
20507 /* Tag_CPU_arch. */
20508 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
20509 /* Tag_CPU_arch_profile. */
20510 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
20511 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
20512 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
20513 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
20514 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
20515 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
20516 /* Tag_ARM_ISA_use. */
20517 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
20518 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
20519 /* Tag_THUMB_ISA_use. */
20520 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
20521 elf32_arm_add_eabi_attr_int (stdoutput, 9,
20522 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
20523 /* Tag_VFP_arch. */
20524 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
20525 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
20526 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
20527 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
20528 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
20529 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
20530 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
20531 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
20532 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
20533 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
20534 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
20535 /* Tag_WMMX_arch. */
20536 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
20537 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
20538 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
20539 /* Tag_NEON_arch. */
20540 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
20541 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
20542 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
20543 }
20544
20545 /* Add the .ARM.attributes section. */
20546 void
20547 arm_md_end (void)
20548 {
20549 segT s;
20550 char *p;
20551 addressT addr;
20552 offsetT size;
20553
20554 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20555 return;
20556
20557 aeabi_set_public_attributes ();
20558 size = elf32_arm_eabi_attr_size (stdoutput);
20559 s = subseg_new (".ARM.attributes", 0);
20560 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
20561 addr = frag_now_fix ();
20562 p = frag_more (size);
20563 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
20564 }
20565 #endif /* OBJ_ELF */
20566
20567
20568 /* Parse a .cpu directive. */
20569
20570 static void
20571 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
20572 {
20573 const struct arm_cpu_option_table *opt;
20574 char *name;
20575 char saved_char;
20576
20577 name = input_line_pointer;
20578 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20579 input_line_pointer++;
20580 saved_char = *input_line_pointer;
20581 *input_line_pointer = 0;
20582
20583 /* Skip the first "all" entry. */
20584 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
20585 if (streq (opt->name, name))
20586 {
20587 mcpu_cpu_opt = &opt->value;
20588 selected_cpu = opt->value;
20589 if (opt->canonical_name)
20590 strcpy(selected_cpu_name, opt->canonical_name);
20591 else
20592 {
20593 int i;
20594 for (i = 0; opt->name[i]; i++)
20595 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20596 selected_cpu_name[i] = 0;
20597 }
20598 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20599 *input_line_pointer = saved_char;
20600 demand_empty_rest_of_line ();
20601 return;
20602 }
20603 as_bad (_("unknown cpu `%s'"), name);
20604 *input_line_pointer = saved_char;
20605 ignore_rest_of_line ();
20606 }
20607
20608
20609 /* Parse a .arch directive. */
20610
20611 static void
20612 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
20613 {
20614 const struct arm_arch_option_table *opt;
20615 char saved_char;
20616 char *name;
20617
20618 name = input_line_pointer;
20619 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20620 input_line_pointer++;
20621 saved_char = *input_line_pointer;
20622 *input_line_pointer = 0;
20623
20624 /* Skip the first "all" entry. */
20625 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20626 if (streq (opt->name, name))
20627 {
20628 mcpu_cpu_opt = &opt->value;
20629 selected_cpu = opt->value;
20630 strcpy(selected_cpu_name, opt->name);
20631 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20632 *input_line_pointer = saved_char;
20633 demand_empty_rest_of_line ();
20634 return;
20635 }
20636
20637 as_bad (_("unknown architecture `%s'\n"), name);
20638 *input_line_pointer = saved_char;
20639 ignore_rest_of_line ();
20640 }
20641
20642
20643 /* Parse a .object_arch directive. */
20644
20645 static void
20646 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
20647 {
20648 const struct arm_arch_option_table *opt;
20649 char saved_char;
20650 char *name;
20651
20652 name = input_line_pointer;
20653 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20654 input_line_pointer++;
20655 saved_char = *input_line_pointer;
20656 *input_line_pointer = 0;
20657
20658 /* Skip the first "all" entry. */
20659 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20660 if (streq (opt->name, name))
20661 {
20662 object_arch = &opt->value;
20663 *input_line_pointer = saved_char;
20664 demand_empty_rest_of_line ();
20665 return;
20666 }
20667
20668 as_bad (_("unknown architecture `%s'\n"), name);
20669 *input_line_pointer = saved_char;
20670 ignore_rest_of_line ();
20671 }
20672
20673
20674 /* Parse a .fpu directive. */
20675
20676 static void
20677 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
20678 {
20679 const struct arm_option_cpu_value_table *opt;
20680 char saved_char;
20681 char *name;
20682
20683 name = input_line_pointer;
20684 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20685 input_line_pointer++;
20686 saved_char = *input_line_pointer;
20687 *input_line_pointer = 0;
20688
20689 for (opt = arm_fpus; opt->name != NULL; opt++)
20690 if (streq (opt->name, name))
20691 {
20692 mfpu_opt = &opt->value;
20693 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20694 *input_line_pointer = saved_char;
20695 demand_empty_rest_of_line ();
20696 return;
20697 }
20698
20699 as_bad (_("unknown floating point format `%s'\n"), name);
20700 *input_line_pointer = saved_char;
20701 ignore_rest_of_line ();
20702 }
20703
20704 /* Copy symbol information. */
20705 void
20706 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
20707 {
20708 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
20709 }