2007-04-19 Paul Brook <paul@codesourcery.com>
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #define WARN_DEPRECATED 1
46
47 #ifdef OBJ_ELF
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
50
51 /* This structure holds the unwinding state. */
52
53 static struct
54 {
55 symbolS * proc_start;
56 symbolS * table_entry;
57 symbolS * personality_routine;
58 int personality_index;
59 /* The segment containing the function. */
60 segT saved_seg;
61 subsegT saved_subseg;
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes;
64 int opcode_count;
65 int opcode_alloc;
66 /* The number of bytes pushed to the stack. */
67 offsetT frame_size;
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
74 offsetT fp_offset;
75 int fp_reg;
76 /* Nonzero if an unwind_setfp directive has been seen. */
77 unsigned fp_used:1;
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored:1;
80 } unwind;
81
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency = 0;
86
87 #endif /* OBJ_ELF */
88
89 /* Results from operand parsing worker functions. */
90
91 typedef enum
92 {
93 PARSE_OPERAND_SUCCESS,
94 PARSE_OPERAND_FAIL,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result;
97
98 enum arm_float_abi
99 {
100 ARM_FLOAT_ABI_HARD,
101 ARM_FLOAT_ABI_SOFTFP,
102 ARM_FLOAT_ABI_SOFT
103 };
104
105 /* Types of processor to assemble for. */
106 #ifndef CPU_DEFAULT
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
109 #else
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
112 #endif
113 #endif
114 #endif
115
116 #ifndef FPU_DEFAULT
117 # ifdef TE_LINUX
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
120 # ifdef OBJ_ELF
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
122 # else
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
125 # endif
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
128 # else
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
131 # endif
132 #endif /* ifndef FPU_DEFAULT */
133
134 #define streq(a, b) (strcmp (a, b) == 0)
135
136 static arm_feature_set cpu_variant;
137 static arm_feature_set arm_arch_used;
138 static arm_feature_set thumb_arch_used;
139
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26 = FALSE;
142 static int atpcs = FALSE;
143 static int support_interwork = FALSE;
144 static int uses_apcs_float = FALSE;
145 static int pic_code = FALSE;
146
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
149 assembly flags. */
150 static const arm_feature_set *legacy_cpu = NULL;
151 static const arm_feature_set *legacy_fpu = NULL;
152
153 static const arm_feature_set *mcpu_cpu_opt = NULL;
154 static const arm_feature_set *mcpu_fpu_opt = NULL;
155 static const arm_feature_set *march_cpu_opt = NULL;
156 static const arm_feature_set *march_fpu_opt = NULL;
157 static const arm_feature_set *mfpu_opt = NULL;
158 static const arm_feature_set *object_arch = NULL;
159
160 /* Constants for known architecture features. */
161 static const arm_feature_set fpu_default = FPU_DEFAULT;
162 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
163 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
164 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
165 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
166 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
167 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
168 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
169 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
170
171 #ifdef CPU_DEFAULT
172 static const arm_feature_set cpu_default = CPU_DEFAULT;
173 #endif
174
175 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
176 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
177 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
178 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
179 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
180 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
181 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
182 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
183 static const arm_feature_set arm_ext_v4t_5 =
184 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
185 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
186 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
187 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
188 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
189 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
190 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
191 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
192 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
193 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
194 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
195 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
196 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
197 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
198 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
199
200 static const arm_feature_set arm_arch_any = ARM_ANY;
201 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
203 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
204
205 static const arm_feature_set arm_cext_iwmmxt2 =
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
207 static const arm_feature_set arm_cext_iwmmxt =
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
209 static const arm_feature_set arm_cext_xscale =
210 ARM_FEATURE (0, ARM_CEXT_XSCALE);
211 static const arm_feature_set arm_cext_maverick =
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
213 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
214 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
215 static const arm_feature_set fpu_vfp_ext_v1xd =
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
217 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
218 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
219 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
220 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
221 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
222 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
223
224 static int mfloat_abi_opt = -1;
225 /* Record user cpu selection for object attributes. */
226 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
227 /* Must be long enough to hold any of the names in arm_cpus. */
228 static char selected_cpu_name[16];
229 #ifdef OBJ_ELF
230 # ifdef EABI_DEFAULT
231 static int meabi_flags = EABI_DEFAULT;
232 # else
233 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
234 # endif
235
236 bfd_boolean
237 arm_is_eabi(void)
238 {
239 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
240 }
241 #endif
242
243 #ifdef OBJ_ELF
244 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
245 symbolS * GOT_symbol;
246 #endif
247
248 /* 0: assemble for ARM,
249 1: assemble for Thumb,
250 2: assemble for Thumb even though target CPU does not support thumb
251 instructions. */
252 static int thumb_mode = 0;
253
254 /* If unified_syntax is true, we are processing the new unified
255 ARM/Thumb syntax. Important differences from the old ARM mode:
256
257 - Immediate operands do not require a # prefix.
258 - Conditional affixes always appear at the end of the
259 instruction. (For backward compatibility, those instructions
260 that formerly had them in the middle, continue to accept them
261 there.)
262 - The IT instruction may appear, and if it does is validated
263 against subsequent conditional affixes. It does not generate
264 machine code.
265
266 Important differences from the old Thumb mode:
267
268 - Immediate operands do not require a # prefix.
269 - Most of the V6T2 instructions are only available in unified mode.
270 - The .N and .W suffixes are recognized and honored (it is an error
271 if they cannot be honored).
272 - All instructions set the flags if and only if they have an 's' affix.
273 - Conditional affixes may be used. They are validated against
274 preceding IT instructions. Unlike ARM mode, you cannot use a
275 conditional affix except in the scope of an IT instruction. */
276
277 static bfd_boolean unified_syntax = FALSE;
278
279 enum neon_el_type
280 {
281 NT_invtype,
282 NT_untyped,
283 NT_integer,
284 NT_float,
285 NT_poly,
286 NT_signed,
287 NT_unsigned
288 };
289
290 struct neon_type_el
291 {
292 enum neon_el_type type;
293 unsigned size;
294 };
295
296 #define NEON_MAX_TYPE_ELS 4
297
298 struct neon_type
299 {
300 struct neon_type_el el[NEON_MAX_TYPE_ELS];
301 unsigned elems;
302 };
303
304 struct arm_it
305 {
306 const char * error;
307 unsigned long instruction;
308 int size;
309 int size_req;
310 int cond;
311 /* "uncond_value" is set to the value in place of the conditional field in
312 unconditional versions of the instruction, or -1 if nothing is
313 appropriate. */
314 int uncond_value;
315 struct neon_type vectype;
316 /* Set to the opcode if the instruction needs relaxation.
317 Zero if the instruction is not relaxed. */
318 unsigned long relax;
319 struct
320 {
321 bfd_reloc_code_real_type type;
322 expressionS exp;
323 int pc_rel;
324 } reloc;
325
326 struct
327 {
328 unsigned reg;
329 signed int imm;
330 struct neon_type_el vectype;
331 unsigned present : 1; /* Operand present. */
332 unsigned isreg : 1; /* Operand was a register. */
333 unsigned immisreg : 1; /* .imm field is a second register. */
334 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
335 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
336 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
337 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
338 instructions. This allows us to disambiguate ARM <-> vector insns. */
339 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
340 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
341 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
342 unsigned issingle : 1; /* Operand is VFP single-precision register. */
343 unsigned hasreloc : 1; /* Operand has relocation suffix. */
344 unsigned writeback : 1; /* Operand has trailing ! */
345 unsigned preind : 1; /* Preindexed address. */
346 unsigned postind : 1; /* Postindexed address. */
347 unsigned negative : 1; /* Index register was negated. */
348 unsigned shifted : 1; /* Shift applied to operation. */
349 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
350 } operands[6];
351 };
352
353 static struct arm_it inst;
354
355 #define NUM_FLOAT_VALS 8
356
357 const char * fp_const[] =
358 {
359 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
360 };
361
362 /* Number of littlenums required to hold an extended precision number. */
363 #define MAX_LITTLENUMS 6
364
365 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
366
367 #define FAIL (-1)
368 #define SUCCESS (0)
369
370 #define SUFF_S 1
371 #define SUFF_D 2
372 #define SUFF_E 3
373 #define SUFF_P 4
374
375 #define CP_T_X 0x00008000
376 #define CP_T_Y 0x00400000
377
378 #define CONDS_BIT 0x00100000
379 #define LOAD_BIT 0x00100000
380
381 #define DOUBLE_LOAD_FLAG 0x00000001
382
383 struct asm_cond
384 {
385 const char * template;
386 unsigned long value;
387 };
388
389 #define COND_ALWAYS 0xE
390
391 struct asm_psr
392 {
393 const char *template;
394 unsigned long field;
395 };
396
397 struct asm_barrier_opt
398 {
399 const char *template;
400 unsigned long value;
401 };
402
403 /* The bit that distinguishes CPSR and SPSR. */
404 #define SPSR_BIT (1 << 22)
405
406 /* The individual PSR flag bits. */
407 #define PSR_c (1 << 16)
408 #define PSR_x (1 << 17)
409 #define PSR_s (1 << 18)
410 #define PSR_f (1 << 19)
411
412 struct reloc_entry
413 {
414 char *name;
415 bfd_reloc_code_real_type reloc;
416 };
417
418 enum vfp_reg_pos
419 {
420 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
421 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
422 };
423
424 enum vfp_ldstm_type
425 {
426 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
427 };
428
429 /* Bits for DEFINED field in neon_typed_alias. */
430 #define NTA_HASTYPE 1
431 #define NTA_HASINDEX 2
432
433 struct neon_typed_alias
434 {
435 unsigned char defined;
436 unsigned char index;
437 struct neon_type_el eltype;
438 };
439
440 /* ARM register categories. This includes coprocessor numbers and various
441 architecture extensions' registers. */
442 enum arm_reg_type
443 {
444 REG_TYPE_RN,
445 REG_TYPE_CP,
446 REG_TYPE_CN,
447 REG_TYPE_FN,
448 REG_TYPE_VFS,
449 REG_TYPE_VFD,
450 REG_TYPE_NQ,
451 REG_TYPE_VFSD,
452 REG_TYPE_NDQ,
453 REG_TYPE_NSDQ,
454 REG_TYPE_VFC,
455 REG_TYPE_MVF,
456 REG_TYPE_MVD,
457 REG_TYPE_MVFX,
458 REG_TYPE_MVDX,
459 REG_TYPE_MVAX,
460 REG_TYPE_DSPSC,
461 REG_TYPE_MMXWR,
462 REG_TYPE_MMXWC,
463 REG_TYPE_MMXWCG,
464 REG_TYPE_XSCALE,
465 };
466
467 /* Structure for a hash table entry for a register.
468 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
469 information which states whether a vector type or index is specified (for a
470 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
471 struct reg_entry
472 {
473 const char *name;
474 unsigned char number;
475 unsigned char type;
476 unsigned char builtin;
477 struct neon_typed_alias *neon;
478 };
479
480 /* Diagnostics used when we don't get a register of the expected type. */
481 const char *const reg_expected_msgs[] =
482 {
483 N_("ARM register expected"),
484 N_("bad or missing co-processor number"),
485 N_("co-processor register expected"),
486 N_("FPA register expected"),
487 N_("VFP single precision register expected"),
488 N_("VFP/Neon double precision register expected"),
489 N_("Neon quad precision register expected"),
490 N_("VFP single or double precision register expected"),
491 N_("Neon double or quad precision register expected"),
492 N_("VFP single, double or Neon quad precision register expected"),
493 N_("VFP system register expected"),
494 N_("Maverick MVF register expected"),
495 N_("Maverick MVD register expected"),
496 N_("Maverick MVFX register expected"),
497 N_("Maverick MVDX register expected"),
498 N_("Maverick MVAX register expected"),
499 N_("Maverick DSPSC register expected"),
500 N_("iWMMXt data register expected"),
501 N_("iWMMXt control register expected"),
502 N_("iWMMXt scalar register expected"),
503 N_("XScale accumulator register expected"),
504 };
505
506 /* Some well known registers that we refer to directly elsewhere. */
507 #define REG_SP 13
508 #define REG_LR 14
509 #define REG_PC 15
510
511 /* ARM instructions take 4bytes in the object file, Thumb instructions
512 take 2: */
513 #define INSN_SIZE 4
514
515 struct asm_opcode
516 {
517 /* Basic string to match. */
518 const char *template;
519
520 /* Parameters to instruction. */
521 unsigned char operands[8];
522
523 /* Conditional tag - see opcode_lookup. */
524 unsigned int tag : 4;
525
526 /* Basic instruction code. */
527 unsigned int avalue : 28;
528
529 /* Thumb-format instruction code. */
530 unsigned int tvalue;
531
532 /* Which architecture variant provides this instruction. */
533 const arm_feature_set *avariant;
534 const arm_feature_set *tvariant;
535
536 /* Function to call to encode instruction in ARM format. */
537 void (* aencode) (void);
538
539 /* Function to call to encode instruction in Thumb format. */
540 void (* tencode) (void);
541 };
542
543 /* Defines for various bits that we will want to toggle. */
544 #define INST_IMMEDIATE 0x02000000
545 #define OFFSET_REG 0x02000000
546 #define HWOFFSET_IMM 0x00400000
547 #define SHIFT_BY_REG 0x00000010
548 #define PRE_INDEX 0x01000000
549 #define INDEX_UP 0x00800000
550 #define WRITE_BACK 0x00200000
551 #define LDM_TYPE_2_OR_3 0x00400000
552 #define CPSI_MMOD 0x00020000
553
554 #define LITERAL_MASK 0xf000f000
555 #define OPCODE_MASK 0xfe1fffff
556 #define V4_STR_BIT 0x00000020
557
558 #define DATA_OP_SHIFT 21
559
560 #define T2_OPCODE_MASK 0xfe1fffff
561 #define T2_DATA_OP_SHIFT 21
562
563 /* Codes to distinguish the arithmetic instructions. */
564 #define OPCODE_AND 0
565 #define OPCODE_EOR 1
566 #define OPCODE_SUB 2
567 #define OPCODE_RSB 3
568 #define OPCODE_ADD 4
569 #define OPCODE_ADC 5
570 #define OPCODE_SBC 6
571 #define OPCODE_RSC 7
572 #define OPCODE_TST 8
573 #define OPCODE_TEQ 9
574 #define OPCODE_CMP 10
575 #define OPCODE_CMN 11
576 #define OPCODE_ORR 12
577 #define OPCODE_MOV 13
578 #define OPCODE_BIC 14
579 #define OPCODE_MVN 15
580
581 #define T2_OPCODE_AND 0
582 #define T2_OPCODE_BIC 1
583 #define T2_OPCODE_ORR 2
584 #define T2_OPCODE_ORN 3
585 #define T2_OPCODE_EOR 4
586 #define T2_OPCODE_ADD 8
587 #define T2_OPCODE_ADC 10
588 #define T2_OPCODE_SBC 11
589 #define T2_OPCODE_SUB 13
590 #define T2_OPCODE_RSB 14
591
592 #define T_OPCODE_MUL 0x4340
593 #define T_OPCODE_TST 0x4200
594 #define T_OPCODE_CMN 0x42c0
595 #define T_OPCODE_NEG 0x4240
596 #define T_OPCODE_MVN 0x43c0
597
598 #define T_OPCODE_ADD_R3 0x1800
599 #define T_OPCODE_SUB_R3 0x1a00
600 #define T_OPCODE_ADD_HI 0x4400
601 #define T_OPCODE_ADD_ST 0xb000
602 #define T_OPCODE_SUB_ST 0xb080
603 #define T_OPCODE_ADD_SP 0xa800
604 #define T_OPCODE_ADD_PC 0xa000
605 #define T_OPCODE_ADD_I8 0x3000
606 #define T_OPCODE_SUB_I8 0x3800
607 #define T_OPCODE_ADD_I3 0x1c00
608 #define T_OPCODE_SUB_I3 0x1e00
609
610 #define T_OPCODE_ASR_R 0x4100
611 #define T_OPCODE_LSL_R 0x4080
612 #define T_OPCODE_LSR_R 0x40c0
613 #define T_OPCODE_ROR_R 0x41c0
614 #define T_OPCODE_ASR_I 0x1000
615 #define T_OPCODE_LSL_I 0x0000
616 #define T_OPCODE_LSR_I 0x0800
617
618 #define T_OPCODE_MOV_I8 0x2000
619 #define T_OPCODE_CMP_I8 0x2800
620 #define T_OPCODE_CMP_LR 0x4280
621 #define T_OPCODE_MOV_HR 0x4600
622 #define T_OPCODE_CMP_HR 0x4500
623
624 #define T_OPCODE_LDR_PC 0x4800
625 #define T_OPCODE_LDR_SP 0x9800
626 #define T_OPCODE_STR_SP 0x9000
627 #define T_OPCODE_LDR_IW 0x6800
628 #define T_OPCODE_STR_IW 0x6000
629 #define T_OPCODE_LDR_IH 0x8800
630 #define T_OPCODE_STR_IH 0x8000
631 #define T_OPCODE_LDR_IB 0x7800
632 #define T_OPCODE_STR_IB 0x7000
633 #define T_OPCODE_LDR_RW 0x5800
634 #define T_OPCODE_STR_RW 0x5000
635 #define T_OPCODE_LDR_RH 0x5a00
636 #define T_OPCODE_STR_RH 0x5200
637 #define T_OPCODE_LDR_RB 0x5c00
638 #define T_OPCODE_STR_RB 0x5400
639
640 #define T_OPCODE_PUSH 0xb400
641 #define T_OPCODE_POP 0xbc00
642
643 #define T_OPCODE_BRANCH 0xe000
644
645 #define THUMB_SIZE 2 /* Size of thumb instruction. */
646 #define THUMB_PP_PC_LR 0x0100
647 #define THUMB_LOAD_BIT 0x0800
648 #define THUMB2_LOAD_BIT 0x00100000
649
650 #define BAD_ARGS _("bad arguments to instruction")
651 #define BAD_PC _("r15 not allowed here")
652 #define BAD_COND _("instruction cannot be conditional")
653 #define BAD_OVERLAP _("registers may not be the same")
654 #define BAD_HIREG _("lo register required")
655 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
656 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
657 #define BAD_BRANCH _("branch must be last instruction in IT block")
658 #define BAD_NOT_IT _("instruction not allowed in IT block")
659 #define BAD_FPU _("selected FPU does not support instruction")
660
661 static struct hash_control *arm_ops_hsh;
662 static struct hash_control *arm_cond_hsh;
663 static struct hash_control *arm_shift_hsh;
664 static struct hash_control *arm_psr_hsh;
665 static struct hash_control *arm_v7m_psr_hsh;
666 static struct hash_control *arm_reg_hsh;
667 static struct hash_control *arm_reloc_hsh;
668 static struct hash_control *arm_barrier_opt_hsh;
669
670 /* Stuff needed to resolve the label ambiguity
671 As:
672 ...
673 label: <insn>
674 may differ from:
675 ...
676 label:
677 <insn>
678 */
679
680 symbolS * last_label_seen;
681 static int label_is_thumb_function_name = FALSE;
682 \f
683 /* Literal pool structure. Held on a per-section
684 and per-sub-section basis. */
685
686 #define MAX_LITERAL_POOL_SIZE 1024
687 typedef struct literal_pool
688 {
689 expressionS literals [MAX_LITERAL_POOL_SIZE];
690 unsigned int next_free_entry;
691 unsigned int id;
692 symbolS * symbol;
693 segT section;
694 subsegT sub_section;
695 struct literal_pool * next;
696 } literal_pool;
697
698 /* Pointer to a linked list of literal pools. */
699 literal_pool * list_of_pools = NULL;
700
701 /* State variables for IT block handling. */
702 static bfd_boolean current_it_mask = 0;
703 static int current_cc;
704
705 \f
706 /* Pure syntax. */
707
708 /* This array holds the chars that always start a comment. If the
709 pre-processor is disabled, these aren't very useful. */
710 const char comment_chars[] = "@";
711
712 /* This array holds the chars that only start a comment at the beginning of
713 a line. If the line seems to have the form '# 123 filename'
714 .line and .file directives will appear in the pre-processed output. */
715 /* Note that input_file.c hand checks for '#' at the beginning of the
716 first line of the input file. This is because the compiler outputs
717 #NO_APP at the beginning of its output. */
718 /* Also note that comments like this one will always work. */
719 const char line_comment_chars[] = "#";
720
721 const char line_separator_chars[] = ";";
722
723 /* Chars that can be used to separate mant
724 from exp in floating point numbers. */
725 const char EXP_CHARS[] = "eE";
726
727 /* Chars that mean this number is a floating point constant. */
728 /* As in 0f12.456 */
729 /* or 0d1.2345e12 */
730
731 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
732
733 /* Prefix characters that indicate the start of an immediate
734 value. */
735 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
736
737 /* Separator character handling. */
738
739 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
740
741 static inline int
742 skip_past_char (char ** str, char c)
743 {
744 if (**str == c)
745 {
746 (*str)++;
747 return SUCCESS;
748 }
749 else
750 return FAIL;
751 }
752 #define skip_past_comma(str) skip_past_char (str, ',')
753
754 /* Arithmetic expressions (possibly involving symbols). */
755
756 /* Return TRUE if anything in the expression is a bignum. */
757
758 static int
759 walk_no_bignums (symbolS * sp)
760 {
761 if (symbol_get_value_expression (sp)->X_op == O_big)
762 return 1;
763
764 if (symbol_get_value_expression (sp)->X_add_symbol)
765 {
766 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
767 || (symbol_get_value_expression (sp)->X_op_symbol
768 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
769 }
770
771 return 0;
772 }
773
774 static int in_my_get_expression = 0;
775
776 /* Third argument to my_get_expression. */
777 #define GE_NO_PREFIX 0
778 #define GE_IMM_PREFIX 1
779 #define GE_OPT_PREFIX 2
780 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
781 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
782 #define GE_OPT_PREFIX_BIG 3
783
784 static int
785 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
786 {
787 char * save_in;
788 segT seg;
789
790 /* In unified syntax, all prefixes are optional. */
791 if (unified_syntax)
792 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
793 : GE_OPT_PREFIX;
794
795 switch (prefix_mode)
796 {
797 case GE_NO_PREFIX: break;
798 case GE_IMM_PREFIX:
799 if (!is_immediate_prefix (**str))
800 {
801 inst.error = _("immediate expression requires a # prefix");
802 return FAIL;
803 }
804 (*str)++;
805 break;
806 case GE_OPT_PREFIX:
807 case GE_OPT_PREFIX_BIG:
808 if (is_immediate_prefix (**str))
809 (*str)++;
810 break;
811 default: abort ();
812 }
813
814 memset (ep, 0, sizeof (expressionS));
815
816 save_in = input_line_pointer;
817 input_line_pointer = *str;
818 in_my_get_expression = 1;
819 seg = expression (ep);
820 in_my_get_expression = 0;
821
822 if (ep->X_op == O_illegal)
823 {
824 /* We found a bad expression in md_operand(). */
825 *str = input_line_pointer;
826 input_line_pointer = save_in;
827 if (inst.error == NULL)
828 inst.error = _("bad expression");
829 return 1;
830 }
831
832 #ifdef OBJ_AOUT
833 if (seg != absolute_section
834 && seg != text_section
835 && seg != data_section
836 && seg != bss_section
837 && seg != undefined_section)
838 {
839 inst.error = _("bad segment");
840 *str = input_line_pointer;
841 input_line_pointer = save_in;
842 return 1;
843 }
844 #endif
845
846 /* Get rid of any bignums now, so that we don't generate an error for which
847 we can't establish a line number later on. Big numbers are never valid
848 in instructions, which is where this routine is always called. */
849 if (prefix_mode != GE_OPT_PREFIX_BIG
850 && (ep->X_op == O_big
851 || (ep->X_add_symbol
852 && (walk_no_bignums (ep->X_add_symbol)
853 || (ep->X_op_symbol
854 && walk_no_bignums (ep->X_op_symbol))))))
855 {
856 inst.error = _("invalid constant");
857 *str = input_line_pointer;
858 input_line_pointer = save_in;
859 return 1;
860 }
861
862 *str = input_line_pointer;
863 input_line_pointer = save_in;
864 return 0;
865 }
866
867 /* Turn a string in input_line_pointer into a floating point constant
868 of type TYPE, and store the appropriate bytes in *LITP. The number
869 of LITTLENUMS emitted is stored in *SIZEP. An error message is
870 returned, or NULL on OK.
871
872 Note that fp constants aren't represent in the normal way on the ARM.
873 In big endian mode, things are as expected. However, in little endian
874 mode fp constants are big-endian word-wise, and little-endian byte-wise
875 within the words. For example, (double) 1.1 in big endian mode is
876 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
877 the byte sequence 99 99 f1 3f 9a 99 99 99.
878
879 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
880
881 char *
882 md_atof (int type, char * litP, int * sizeP)
883 {
884 int prec;
885 LITTLENUM_TYPE words[MAX_LITTLENUMS];
886 char *t;
887 int i;
888
889 switch (type)
890 {
891 case 'f':
892 case 'F':
893 case 's':
894 case 'S':
895 prec = 2;
896 break;
897
898 case 'd':
899 case 'D':
900 case 'r':
901 case 'R':
902 prec = 4;
903 break;
904
905 case 'x':
906 case 'X':
907 prec = 6;
908 break;
909
910 case 'p':
911 case 'P':
912 prec = 6;
913 break;
914
915 default:
916 *sizeP = 0;
917 return _("bad call to MD_ATOF()");
918 }
919
920 t = atof_ieee (input_line_pointer, type, words);
921 if (t)
922 input_line_pointer = t;
923 *sizeP = prec * 2;
924
925 if (target_big_endian)
926 {
927 for (i = 0; i < prec; i++)
928 {
929 md_number_to_chars (litP, (valueT) words[i], 2);
930 litP += 2;
931 }
932 }
933 else
934 {
935 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
936 for (i = prec - 1; i >= 0; i--)
937 {
938 md_number_to_chars (litP, (valueT) words[i], 2);
939 litP += 2;
940 }
941 else
942 /* For a 4 byte float the order of elements in `words' is 1 0.
943 For an 8 byte float the order is 1 0 3 2. */
944 for (i = 0; i < prec; i += 2)
945 {
946 md_number_to_chars (litP, (valueT) words[i + 1], 2);
947 md_number_to_chars (litP + 2, (valueT) words[i], 2);
948 litP += 4;
949 }
950 }
951
952 return 0;
953 }
954
955 /* We handle all bad expressions here, so that we can report the faulty
956 instruction in the error message. */
957 void
958 md_operand (expressionS * expr)
959 {
960 if (in_my_get_expression)
961 expr->X_op = O_illegal;
962 }
963
964 /* Immediate values. */
965
966 /* Generic immediate-value read function for use in directives.
967 Accepts anything that 'expression' can fold to a constant.
968 *val receives the number. */
969 #ifdef OBJ_ELF
970 static int
971 immediate_for_directive (int *val)
972 {
973 expressionS exp;
974 exp.X_op = O_illegal;
975
976 if (is_immediate_prefix (*input_line_pointer))
977 {
978 input_line_pointer++;
979 expression (&exp);
980 }
981
982 if (exp.X_op != O_constant)
983 {
984 as_bad (_("expected #constant"));
985 ignore_rest_of_line ();
986 return FAIL;
987 }
988 *val = exp.X_add_number;
989 return SUCCESS;
990 }
991 #endif
992
993 /* Register parsing. */
994
995 /* Generic register parser. CCP points to what should be the
996 beginning of a register name. If it is indeed a valid register
997 name, advance CCP over it and return the reg_entry structure;
998 otherwise return NULL. Does not issue diagnostics. */
999
1000 static struct reg_entry *
1001 arm_reg_parse_multi (char **ccp)
1002 {
1003 char *start = *ccp;
1004 char *p;
1005 struct reg_entry *reg;
1006
1007 #ifdef REGISTER_PREFIX
1008 if (*start != REGISTER_PREFIX)
1009 return NULL;
1010 start++;
1011 #endif
1012 #ifdef OPTIONAL_REGISTER_PREFIX
1013 if (*start == OPTIONAL_REGISTER_PREFIX)
1014 start++;
1015 #endif
1016
1017 p = start;
1018 if (!ISALPHA (*p) || !is_name_beginner (*p))
1019 return NULL;
1020
1021 do
1022 p++;
1023 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1024
1025 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1026
1027 if (!reg)
1028 return NULL;
1029
1030 *ccp = p;
1031 return reg;
1032 }
1033
1034 static int
1035 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1036 enum arm_reg_type type)
1037 {
1038 /* Alternative syntaxes are accepted for a few register classes. */
1039 switch (type)
1040 {
1041 case REG_TYPE_MVF:
1042 case REG_TYPE_MVD:
1043 case REG_TYPE_MVFX:
1044 case REG_TYPE_MVDX:
1045 /* Generic coprocessor register names are allowed for these. */
1046 if (reg && reg->type == REG_TYPE_CN)
1047 return reg->number;
1048 break;
1049
1050 case REG_TYPE_CP:
1051 /* For backward compatibility, a bare number is valid here. */
1052 {
1053 unsigned long processor = strtoul (start, ccp, 10);
1054 if (*ccp != start && processor <= 15)
1055 return processor;
1056 }
1057
1058 case REG_TYPE_MMXWC:
1059 /* WC includes WCG. ??? I'm not sure this is true for all
1060 instructions that take WC registers. */
1061 if (reg && reg->type == REG_TYPE_MMXWCG)
1062 return reg->number;
1063 break;
1064
1065 default:
1066 break;
1067 }
1068
1069 return FAIL;
1070 }
1071
1072 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1073 return value is the register number or FAIL. */
1074
1075 static int
1076 arm_reg_parse (char **ccp, enum arm_reg_type type)
1077 {
1078 char *start = *ccp;
1079 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1080 int ret;
1081
1082 /* Do not allow a scalar (reg+index) to parse as a register. */
1083 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1084 return FAIL;
1085
1086 if (reg && reg->type == type)
1087 return reg->number;
1088
1089 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1090 return ret;
1091
1092 *ccp = start;
1093 return FAIL;
1094 }
1095
1096 /* Parse a Neon type specifier. *STR should point at the leading '.'
1097 character. Does no verification at this stage that the type fits the opcode
1098 properly. E.g.,
1099
1100 .i32.i32.s16
1101 .s32.f32
1102 .u16
1103
1104 Can all be legally parsed by this function.
1105
1106 Fills in neon_type struct pointer with parsed information, and updates STR
1107 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1108 type, FAIL if not. */
1109
1110 static int
1111 parse_neon_type (struct neon_type *type, char **str)
1112 {
1113 char *ptr = *str;
1114
1115 if (type)
1116 type->elems = 0;
1117
1118 while (type->elems < NEON_MAX_TYPE_ELS)
1119 {
1120 enum neon_el_type thistype = NT_untyped;
1121 unsigned thissize = -1u;
1122
1123 if (*ptr != '.')
1124 break;
1125
1126 ptr++;
1127
1128 /* Just a size without an explicit type. */
1129 if (ISDIGIT (*ptr))
1130 goto parsesize;
1131
1132 switch (TOLOWER (*ptr))
1133 {
1134 case 'i': thistype = NT_integer; break;
1135 case 'f': thistype = NT_float; break;
1136 case 'p': thistype = NT_poly; break;
1137 case 's': thistype = NT_signed; break;
1138 case 'u': thistype = NT_unsigned; break;
1139 case 'd':
1140 thistype = NT_float;
1141 thissize = 64;
1142 ptr++;
1143 goto done;
1144 default:
1145 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1146 return FAIL;
1147 }
1148
1149 ptr++;
1150
1151 /* .f is an abbreviation for .f32. */
1152 if (thistype == NT_float && !ISDIGIT (*ptr))
1153 thissize = 32;
1154 else
1155 {
1156 parsesize:
1157 thissize = strtoul (ptr, &ptr, 10);
1158
1159 if (thissize != 8 && thissize != 16 && thissize != 32
1160 && thissize != 64)
1161 {
1162 as_bad (_("bad size %d in type specifier"), thissize);
1163 return FAIL;
1164 }
1165 }
1166
1167 done:
1168 if (type)
1169 {
1170 type->el[type->elems].type = thistype;
1171 type->el[type->elems].size = thissize;
1172 type->elems++;
1173 }
1174 }
1175
1176 /* Empty/missing type is not a successful parse. */
1177 if (type->elems == 0)
1178 return FAIL;
1179
1180 *str = ptr;
1181
1182 return SUCCESS;
1183 }
1184
1185 /* Errors may be set multiple times during parsing or bit encoding
1186 (particularly in the Neon bits), but usually the earliest error which is set
1187 will be the most meaningful. Avoid overwriting it with later (cascading)
1188 errors by calling this function. */
1189
1190 static void
1191 first_error (const char *err)
1192 {
1193 if (!inst.error)
1194 inst.error = err;
1195 }
1196
1197 /* Parse a single type, e.g. ".s32", leading period included. */
1198 static int
1199 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1200 {
1201 char *str = *ccp;
1202 struct neon_type optype;
1203
1204 if (*str == '.')
1205 {
1206 if (parse_neon_type (&optype, &str) == SUCCESS)
1207 {
1208 if (optype.elems == 1)
1209 *vectype = optype.el[0];
1210 else
1211 {
1212 first_error (_("only one type should be specified for operand"));
1213 return FAIL;
1214 }
1215 }
1216 else
1217 {
1218 first_error (_("vector type expected"));
1219 return FAIL;
1220 }
1221 }
1222 else
1223 return FAIL;
1224
1225 *ccp = str;
1226
1227 return SUCCESS;
1228 }
1229
1230 /* Special meanings for indices (which have a range of 0-7), which will fit into
1231 a 4-bit integer. */
1232
1233 #define NEON_ALL_LANES 15
1234 #define NEON_INTERLEAVE_LANES 14
1235
1236 /* Parse either a register or a scalar, with an optional type. Return the
1237 register number, and optionally fill in the actual type of the register
1238 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1239 type/index information in *TYPEINFO. */
1240
1241 static int
1242 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1243 enum arm_reg_type *rtype,
1244 struct neon_typed_alias *typeinfo)
1245 {
1246 char *str = *ccp;
1247 struct reg_entry *reg = arm_reg_parse_multi (&str);
1248 struct neon_typed_alias atype;
1249 struct neon_type_el parsetype;
1250
1251 atype.defined = 0;
1252 atype.index = -1;
1253 atype.eltype.type = NT_invtype;
1254 atype.eltype.size = -1;
1255
1256 /* Try alternate syntax for some types of register. Note these are mutually
1257 exclusive with the Neon syntax extensions. */
1258 if (reg == NULL)
1259 {
1260 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1261 if (altreg != FAIL)
1262 *ccp = str;
1263 if (typeinfo)
1264 *typeinfo = atype;
1265 return altreg;
1266 }
1267
1268 /* Undo polymorphism when a set of register types may be accepted. */
1269 if ((type == REG_TYPE_NDQ
1270 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1271 || (type == REG_TYPE_VFSD
1272 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1273 || (type == REG_TYPE_NSDQ
1274 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1275 || reg->type == REG_TYPE_NQ))
1276 || (type == REG_TYPE_MMXWC
1277 && (reg->type == REG_TYPE_MMXWCG)))
1278 type = reg->type;
1279
1280 if (type != reg->type)
1281 return FAIL;
1282
1283 if (reg->neon)
1284 atype = *reg->neon;
1285
1286 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1287 {
1288 if ((atype.defined & NTA_HASTYPE) != 0)
1289 {
1290 first_error (_("can't redefine type for operand"));
1291 return FAIL;
1292 }
1293 atype.defined |= NTA_HASTYPE;
1294 atype.eltype = parsetype;
1295 }
1296
1297 if (skip_past_char (&str, '[') == SUCCESS)
1298 {
1299 if (type != REG_TYPE_VFD)
1300 {
1301 first_error (_("only D registers may be indexed"));
1302 return FAIL;
1303 }
1304
1305 if ((atype.defined & NTA_HASINDEX) != 0)
1306 {
1307 first_error (_("can't change index for operand"));
1308 return FAIL;
1309 }
1310
1311 atype.defined |= NTA_HASINDEX;
1312
1313 if (skip_past_char (&str, ']') == SUCCESS)
1314 atype.index = NEON_ALL_LANES;
1315 else
1316 {
1317 expressionS exp;
1318
1319 my_get_expression (&exp, &str, GE_NO_PREFIX);
1320
1321 if (exp.X_op != O_constant)
1322 {
1323 first_error (_("constant expression required"));
1324 return FAIL;
1325 }
1326
1327 if (skip_past_char (&str, ']') == FAIL)
1328 return FAIL;
1329
1330 atype.index = exp.X_add_number;
1331 }
1332 }
1333
1334 if (typeinfo)
1335 *typeinfo = atype;
1336
1337 if (rtype)
1338 *rtype = type;
1339
1340 *ccp = str;
1341
1342 return reg->number;
1343 }
1344
1345 /* Like arm_reg_parse, but allow allow the following extra features:
1346 - If RTYPE is non-zero, return the (possibly restricted) type of the
1347 register (e.g. Neon double or quad reg when either has been requested).
1348 - If this is a Neon vector type with additional type information, fill
1349 in the struct pointed to by VECTYPE (if non-NULL).
1350 This function will fault on encountering a scalar.
1351 */
1352
1353 static int
1354 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1355 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1356 {
1357 struct neon_typed_alias atype;
1358 char *str = *ccp;
1359 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1360
1361 if (reg == FAIL)
1362 return FAIL;
1363
1364 /* Do not allow a scalar (reg+index) to parse as a register. */
1365 if ((atype.defined & NTA_HASINDEX) != 0)
1366 {
1367 first_error (_("register operand expected, but got scalar"));
1368 return FAIL;
1369 }
1370
1371 if (vectype)
1372 *vectype = atype.eltype;
1373
1374 *ccp = str;
1375
1376 return reg;
1377 }
1378
1379 #define NEON_SCALAR_REG(X) ((X) >> 4)
1380 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1381
1382 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1383 have enough information to be able to do a good job bounds-checking. So, we
1384 just do easy checks here, and do further checks later. */
1385
1386 static int
1387 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1388 {
1389 int reg;
1390 char *str = *ccp;
1391 struct neon_typed_alias atype;
1392
1393 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1394
1395 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1396 return FAIL;
1397
1398 if (atype.index == NEON_ALL_LANES)
1399 {
1400 first_error (_("scalar must have an index"));
1401 return FAIL;
1402 }
1403 else if (atype.index >= 64 / elsize)
1404 {
1405 first_error (_("scalar index out of range"));
1406 return FAIL;
1407 }
1408
1409 if (type)
1410 *type = atype.eltype;
1411
1412 *ccp = str;
1413
1414 return reg * 16 + atype.index;
1415 }
1416
1417 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1418 static long
1419 parse_reg_list (char ** strp)
1420 {
1421 char * str = * strp;
1422 long range = 0;
1423 int another_range;
1424
1425 /* We come back here if we get ranges concatenated by '+' or '|'. */
1426 do
1427 {
1428 another_range = 0;
1429
1430 if (*str == '{')
1431 {
1432 int in_range = 0;
1433 int cur_reg = -1;
1434
1435 str++;
1436 do
1437 {
1438 int reg;
1439
1440 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1441 {
1442 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1443 return FAIL;
1444 }
1445
1446 if (in_range)
1447 {
1448 int i;
1449
1450 if (reg <= cur_reg)
1451 {
1452 first_error (_("bad range in register list"));
1453 return FAIL;
1454 }
1455
1456 for (i = cur_reg + 1; i < reg; i++)
1457 {
1458 if (range & (1 << i))
1459 as_tsktsk
1460 (_("Warning: duplicated register (r%d) in register list"),
1461 i);
1462 else
1463 range |= 1 << i;
1464 }
1465 in_range = 0;
1466 }
1467
1468 if (range & (1 << reg))
1469 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1470 reg);
1471 else if (reg <= cur_reg)
1472 as_tsktsk (_("Warning: register range not in ascending order"));
1473
1474 range |= 1 << reg;
1475 cur_reg = reg;
1476 }
1477 while (skip_past_comma (&str) != FAIL
1478 || (in_range = 1, *str++ == '-'));
1479 str--;
1480
1481 if (*str++ != '}')
1482 {
1483 first_error (_("missing `}'"));
1484 return FAIL;
1485 }
1486 }
1487 else
1488 {
1489 expressionS expr;
1490
1491 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1492 return FAIL;
1493
1494 if (expr.X_op == O_constant)
1495 {
1496 if (expr.X_add_number
1497 != (expr.X_add_number & 0x0000ffff))
1498 {
1499 inst.error = _("invalid register mask");
1500 return FAIL;
1501 }
1502
1503 if ((range & expr.X_add_number) != 0)
1504 {
1505 int regno = range & expr.X_add_number;
1506
1507 regno &= -regno;
1508 regno = (1 << regno) - 1;
1509 as_tsktsk
1510 (_("Warning: duplicated register (r%d) in register list"),
1511 regno);
1512 }
1513
1514 range |= expr.X_add_number;
1515 }
1516 else
1517 {
1518 if (inst.reloc.type != 0)
1519 {
1520 inst.error = _("expression too complex");
1521 return FAIL;
1522 }
1523
1524 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1525 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1526 inst.reloc.pc_rel = 0;
1527 }
1528 }
1529
1530 if (*str == '|' || *str == '+')
1531 {
1532 str++;
1533 another_range = 1;
1534 }
1535 }
1536 while (another_range);
1537
1538 *strp = str;
1539 return range;
1540 }
1541
1542 /* Types of registers in a list. */
1543
1544 enum reg_list_els
1545 {
1546 REGLIST_VFP_S,
1547 REGLIST_VFP_D,
1548 REGLIST_NEON_D
1549 };
1550
1551 /* Parse a VFP register list. If the string is invalid return FAIL.
1552 Otherwise return the number of registers, and set PBASE to the first
1553 register. Parses registers of type ETYPE.
1554 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1555 - Q registers can be used to specify pairs of D registers
1556 - { } can be omitted from around a singleton register list
1557 FIXME: This is not implemented, as it would require backtracking in
1558 some cases, e.g.:
1559 vtbl.8 d3,d4,d5
1560 This could be done (the meaning isn't really ambiguous), but doesn't
1561 fit in well with the current parsing framework.
1562 - 32 D registers may be used (also true for VFPv3).
1563 FIXME: Types are ignored in these register lists, which is probably a
1564 bug. */
1565
1566 static int
1567 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1568 {
1569 char *str = *ccp;
1570 int base_reg;
1571 int new_base;
1572 enum arm_reg_type regtype = 0;
1573 int max_regs = 0;
1574 int count = 0;
1575 int warned = 0;
1576 unsigned long mask = 0;
1577 int i;
1578
1579 if (*str != '{')
1580 {
1581 inst.error = _("expecting {");
1582 return FAIL;
1583 }
1584
1585 str++;
1586
1587 switch (etype)
1588 {
1589 case REGLIST_VFP_S:
1590 regtype = REG_TYPE_VFS;
1591 max_regs = 32;
1592 break;
1593
1594 case REGLIST_VFP_D:
1595 regtype = REG_TYPE_VFD;
1596 break;
1597
1598 case REGLIST_NEON_D:
1599 regtype = REG_TYPE_NDQ;
1600 break;
1601 }
1602
1603 if (etype != REGLIST_VFP_S)
1604 {
1605 /* VFPv3 allows 32 D registers. */
1606 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1607 {
1608 max_regs = 32;
1609 if (thumb_mode)
1610 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1611 fpu_vfp_ext_v3);
1612 else
1613 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1614 fpu_vfp_ext_v3);
1615 }
1616 else
1617 max_regs = 16;
1618 }
1619
1620 base_reg = max_regs;
1621
1622 do
1623 {
1624 int setmask = 1, addregs = 1;
1625
1626 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1627
1628 if (new_base == FAIL)
1629 {
1630 first_error (_(reg_expected_msgs[regtype]));
1631 return FAIL;
1632 }
1633
1634 if (new_base >= max_regs)
1635 {
1636 first_error (_("register out of range in list"));
1637 return FAIL;
1638 }
1639
1640 /* Note: a value of 2 * n is returned for the register Q<n>. */
1641 if (regtype == REG_TYPE_NQ)
1642 {
1643 setmask = 3;
1644 addregs = 2;
1645 }
1646
1647 if (new_base < base_reg)
1648 base_reg = new_base;
1649
1650 if (mask & (setmask << new_base))
1651 {
1652 first_error (_("invalid register list"));
1653 return FAIL;
1654 }
1655
1656 if ((mask >> new_base) != 0 && ! warned)
1657 {
1658 as_tsktsk (_("register list not in ascending order"));
1659 warned = 1;
1660 }
1661
1662 mask |= setmask << new_base;
1663 count += addregs;
1664
1665 if (*str == '-') /* We have the start of a range expression */
1666 {
1667 int high_range;
1668
1669 str++;
1670
1671 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1672 == FAIL)
1673 {
1674 inst.error = gettext (reg_expected_msgs[regtype]);
1675 return FAIL;
1676 }
1677
1678 if (high_range >= max_regs)
1679 {
1680 first_error (_("register out of range in list"));
1681 return FAIL;
1682 }
1683
1684 if (regtype == REG_TYPE_NQ)
1685 high_range = high_range + 1;
1686
1687 if (high_range <= new_base)
1688 {
1689 inst.error = _("register range not in ascending order");
1690 return FAIL;
1691 }
1692
1693 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1694 {
1695 if (mask & (setmask << new_base))
1696 {
1697 inst.error = _("invalid register list");
1698 return FAIL;
1699 }
1700
1701 mask |= setmask << new_base;
1702 count += addregs;
1703 }
1704 }
1705 }
1706 while (skip_past_comma (&str) != FAIL);
1707
1708 str++;
1709
1710 /* Sanity check -- should have raised a parse error above. */
1711 if (count == 0 || count > max_regs)
1712 abort ();
1713
1714 *pbase = base_reg;
1715
1716 /* Final test -- the registers must be consecutive. */
1717 mask >>= base_reg;
1718 for (i = 0; i < count; i++)
1719 {
1720 if ((mask & (1u << i)) == 0)
1721 {
1722 inst.error = _("non-contiguous register range");
1723 return FAIL;
1724 }
1725 }
1726
1727 *ccp = str;
1728
1729 return count;
1730 }
1731
1732 /* True if two alias types are the same. */
1733
1734 static int
1735 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1736 {
1737 if (!a && !b)
1738 return 1;
1739
1740 if (!a || !b)
1741 return 0;
1742
1743 if (a->defined != b->defined)
1744 return 0;
1745
1746 if ((a->defined & NTA_HASTYPE) != 0
1747 && (a->eltype.type != b->eltype.type
1748 || a->eltype.size != b->eltype.size))
1749 return 0;
1750
1751 if ((a->defined & NTA_HASINDEX) != 0
1752 && (a->index != b->index))
1753 return 0;
1754
1755 return 1;
1756 }
1757
1758 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1759 The base register is put in *PBASE.
1760 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1761 the return value.
1762 The register stride (minus one) is put in bit 4 of the return value.
1763 Bits [6:5] encode the list length (minus one).
1764 The type of the list elements is put in *ELTYPE, if non-NULL. */
1765
1766 #define NEON_LANE(X) ((X) & 0xf)
1767 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1768 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1769
1770 static int
1771 parse_neon_el_struct_list (char **str, unsigned *pbase,
1772 struct neon_type_el *eltype)
1773 {
1774 char *ptr = *str;
1775 int base_reg = -1;
1776 int reg_incr = -1;
1777 int count = 0;
1778 int lane = -1;
1779 int leading_brace = 0;
1780 enum arm_reg_type rtype = REG_TYPE_NDQ;
1781 int addregs = 1;
1782 const char *const incr_error = "register stride must be 1 or 2";
1783 const char *const type_error = "mismatched element/structure types in list";
1784 struct neon_typed_alias firsttype;
1785
1786 if (skip_past_char (&ptr, '{') == SUCCESS)
1787 leading_brace = 1;
1788
1789 do
1790 {
1791 struct neon_typed_alias atype;
1792 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1793
1794 if (getreg == FAIL)
1795 {
1796 first_error (_(reg_expected_msgs[rtype]));
1797 return FAIL;
1798 }
1799
1800 if (base_reg == -1)
1801 {
1802 base_reg = getreg;
1803 if (rtype == REG_TYPE_NQ)
1804 {
1805 reg_incr = 1;
1806 addregs = 2;
1807 }
1808 firsttype = atype;
1809 }
1810 else if (reg_incr == -1)
1811 {
1812 reg_incr = getreg - base_reg;
1813 if (reg_incr < 1 || reg_incr > 2)
1814 {
1815 first_error (_(incr_error));
1816 return FAIL;
1817 }
1818 }
1819 else if (getreg != base_reg + reg_incr * count)
1820 {
1821 first_error (_(incr_error));
1822 return FAIL;
1823 }
1824
1825 if (!neon_alias_types_same (&atype, &firsttype))
1826 {
1827 first_error (_(type_error));
1828 return FAIL;
1829 }
1830
1831 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1832 modes. */
1833 if (ptr[0] == '-')
1834 {
1835 struct neon_typed_alias htype;
1836 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1837 if (lane == -1)
1838 lane = NEON_INTERLEAVE_LANES;
1839 else if (lane != NEON_INTERLEAVE_LANES)
1840 {
1841 first_error (_(type_error));
1842 return FAIL;
1843 }
1844 if (reg_incr == -1)
1845 reg_incr = 1;
1846 else if (reg_incr != 1)
1847 {
1848 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1849 return FAIL;
1850 }
1851 ptr++;
1852 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1853 if (hireg == FAIL)
1854 {
1855 first_error (_(reg_expected_msgs[rtype]));
1856 return FAIL;
1857 }
1858 if (!neon_alias_types_same (&htype, &firsttype))
1859 {
1860 first_error (_(type_error));
1861 return FAIL;
1862 }
1863 count += hireg + dregs - getreg;
1864 continue;
1865 }
1866
1867 /* If we're using Q registers, we can't use [] or [n] syntax. */
1868 if (rtype == REG_TYPE_NQ)
1869 {
1870 count += 2;
1871 continue;
1872 }
1873
1874 if ((atype.defined & NTA_HASINDEX) != 0)
1875 {
1876 if (lane == -1)
1877 lane = atype.index;
1878 else if (lane != atype.index)
1879 {
1880 first_error (_(type_error));
1881 return FAIL;
1882 }
1883 }
1884 else if (lane == -1)
1885 lane = NEON_INTERLEAVE_LANES;
1886 else if (lane != NEON_INTERLEAVE_LANES)
1887 {
1888 first_error (_(type_error));
1889 return FAIL;
1890 }
1891 count++;
1892 }
1893 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1894
1895 /* No lane set by [x]. We must be interleaving structures. */
1896 if (lane == -1)
1897 lane = NEON_INTERLEAVE_LANES;
1898
1899 /* Sanity check. */
1900 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1901 || (count > 1 && reg_incr == -1))
1902 {
1903 first_error (_("error parsing element/structure list"));
1904 return FAIL;
1905 }
1906
1907 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1908 {
1909 first_error (_("expected }"));
1910 return FAIL;
1911 }
1912
1913 if (reg_incr == -1)
1914 reg_incr = 1;
1915
1916 if (eltype)
1917 *eltype = firsttype.eltype;
1918
1919 *pbase = base_reg;
1920 *str = ptr;
1921
1922 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1923 }
1924
1925 /* Parse an explicit relocation suffix on an expression. This is
1926 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1927 arm_reloc_hsh contains no entries, so this function can only
1928 succeed if there is no () after the word. Returns -1 on error,
1929 BFD_RELOC_UNUSED if there wasn't any suffix. */
1930 static int
1931 parse_reloc (char **str)
1932 {
1933 struct reloc_entry *r;
1934 char *p, *q;
1935
1936 if (**str != '(')
1937 return BFD_RELOC_UNUSED;
1938
1939 p = *str + 1;
1940 q = p;
1941
1942 while (*q && *q != ')' && *q != ',')
1943 q++;
1944 if (*q != ')')
1945 return -1;
1946
1947 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1948 return -1;
1949
1950 *str = q + 1;
1951 return r->reloc;
1952 }
1953
1954 /* Directives: register aliases. */
1955
1956 static struct reg_entry *
1957 insert_reg_alias (char *str, int number, int type)
1958 {
1959 struct reg_entry *new;
1960 const char *name;
1961
1962 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1963 {
1964 if (new->builtin)
1965 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1966
1967 /* Only warn about a redefinition if it's not defined as the
1968 same register. */
1969 else if (new->number != number || new->type != type)
1970 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1971
1972 return 0;
1973 }
1974
1975 name = xstrdup (str);
1976 new = xmalloc (sizeof (struct reg_entry));
1977
1978 new->name = name;
1979 new->number = number;
1980 new->type = type;
1981 new->builtin = FALSE;
1982 new->neon = NULL;
1983
1984 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1985 abort ();
1986
1987 return new;
1988 }
1989
1990 static void
1991 insert_neon_reg_alias (char *str, int number, int type,
1992 struct neon_typed_alias *atype)
1993 {
1994 struct reg_entry *reg = insert_reg_alias (str, number, type);
1995
1996 if (!reg)
1997 {
1998 first_error (_("attempt to redefine typed alias"));
1999 return;
2000 }
2001
2002 if (atype)
2003 {
2004 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2005 *reg->neon = *atype;
2006 }
2007 }
2008
2009 /* Look for the .req directive. This is of the form:
2010
2011 new_register_name .req existing_register_name
2012
2013 If we find one, or if it looks sufficiently like one that we want to
2014 handle any error here, return non-zero. Otherwise return zero. */
2015
2016 static int
2017 create_register_alias (char * newname, char *p)
2018 {
2019 struct reg_entry *old;
2020 char *oldname, *nbuf;
2021 size_t nlen;
2022
2023 /* The input scrubber ensures that whitespace after the mnemonic is
2024 collapsed to single spaces. */
2025 oldname = p;
2026 if (strncmp (oldname, " .req ", 6) != 0)
2027 return 0;
2028
2029 oldname += 6;
2030 if (*oldname == '\0')
2031 return 0;
2032
2033 old = hash_find (arm_reg_hsh, oldname);
2034 if (!old)
2035 {
2036 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2037 return 1;
2038 }
2039
2040 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2041 the desired alias name, and p points to its end. If not, then
2042 the desired alias name is in the global original_case_string. */
2043 #ifdef TC_CASE_SENSITIVE
2044 nlen = p - newname;
2045 #else
2046 newname = original_case_string;
2047 nlen = strlen (newname);
2048 #endif
2049
2050 nbuf = alloca (nlen + 1);
2051 memcpy (nbuf, newname, nlen);
2052 nbuf[nlen] = '\0';
2053
2054 /* Create aliases under the new name as stated; an all-lowercase
2055 version of the new name; and an all-uppercase version of the new
2056 name. */
2057 insert_reg_alias (nbuf, old->number, old->type);
2058
2059 for (p = nbuf; *p; p++)
2060 *p = TOUPPER (*p);
2061
2062 if (strncmp (nbuf, newname, nlen))
2063 insert_reg_alias (nbuf, old->number, old->type);
2064
2065 for (p = nbuf; *p; p++)
2066 *p = TOLOWER (*p);
2067
2068 if (strncmp (nbuf, newname, nlen))
2069 insert_reg_alias (nbuf, old->number, old->type);
2070
2071 return 1;
2072 }
2073
2074 /* Create a Neon typed/indexed register alias using directives, e.g.:
2075 X .dn d5.s32[1]
2076 Y .qn 6.s16
2077 Z .dn d7
2078 T .dn Z[0]
2079 These typed registers can be used instead of the types specified after the
2080 Neon mnemonic, so long as all operands given have types. Types can also be
2081 specified directly, e.g.:
2082 vadd d0.s32, d1.s32, d2.s32
2083 */
2084
2085 static int
2086 create_neon_reg_alias (char *newname, char *p)
2087 {
2088 enum arm_reg_type basetype;
2089 struct reg_entry *basereg;
2090 struct reg_entry mybasereg;
2091 struct neon_type ntype;
2092 struct neon_typed_alias typeinfo;
2093 char *namebuf, *nameend;
2094 int namelen;
2095
2096 typeinfo.defined = 0;
2097 typeinfo.eltype.type = NT_invtype;
2098 typeinfo.eltype.size = -1;
2099 typeinfo.index = -1;
2100
2101 nameend = p;
2102
2103 if (strncmp (p, " .dn ", 5) == 0)
2104 basetype = REG_TYPE_VFD;
2105 else if (strncmp (p, " .qn ", 5) == 0)
2106 basetype = REG_TYPE_NQ;
2107 else
2108 return 0;
2109
2110 p += 5;
2111
2112 if (*p == '\0')
2113 return 0;
2114
2115 basereg = arm_reg_parse_multi (&p);
2116
2117 if (basereg && basereg->type != basetype)
2118 {
2119 as_bad (_("bad type for register"));
2120 return 0;
2121 }
2122
2123 if (basereg == NULL)
2124 {
2125 expressionS exp;
2126 /* Try parsing as an integer. */
2127 my_get_expression (&exp, &p, GE_NO_PREFIX);
2128 if (exp.X_op != O_constant)
2129 {
2130 as_bad (_("expression must be constant"));
2131 return 0;
2132 }
2133 basereg = &mybasereg;
2134 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2135 : exp.X_add_number;
2136 basereg->neon = 0;
2137 }
2138
2139 if (basereg->neon)
2140 typeinfo = *basereg->neon;
2141
2142 if (parse_neon_type (&ntype, &p) == SUCCESS)
2143 {
2144 /* We got a type. */
2145 if (typeinfo.defined & NTA_HASTYPE)
2146 {
2147 as_bad (_("can't redefine the type of a register alias"));
2148 return 0;
2149 }
2150
2151 typeinfo.defined |= NTA_HASTYPE;
2152 if (ntype.elems != 1)
2153 {
2154 as_bad (_("you must specify a single type only"));
2155 return 0;
2156 }
2157 typeinfo.eltype = ntype.el[0];
2158 }
2159
2160 if (skip_past_char (&p, '[') == SUCCESS)
2161 {
2162 expressionS exp;
2163 /* We got a scalar index. */
2164
2165 if (typeinfo.defined & NTA_HASINDEX)
2166 {
2167 as_bad (_("can't redefine the index of a scalar alias"));
2168 return 0;
2169 }
2170
2171 my_get_expression (&exp, &p, GE_NO_PREFIX);
2172
2173 if (exp.X_op != O_constant)
2174 {
2175 as_bad (_("scalar index must be constant"));
2176 return 0;
2177 }
2178
2179 typeinfo.defined |= NTA_HASINDEX;
2180 typeinfo.index = exp.X_add_number;
2181
2182 if (skip_past_char (&p, ']') == FAIL)
2183 {
2184 as_bad (_("expecting ]"));
2185 return 0;
2186 }
2187 }
2188
2189 namelen = nameend - newname;
2190 namebuf = alloca (namelen + 1);
2191 strncpy (namebuf, newname, namelen);
2192 namebuf[namelen] = '\0';
2193
2194 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2195 typeinfo.defined != 0 ? &typeinfo : NULL);
2196
2197 /* Insert name in all uppercase. */
2198 for (p = namebuf; *p; p++)
2199 *p = TOUPPER (*p);
2200
2201 if (strncmp (namebuf, newname, namelen))
2202 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2203 typeinfo.defined != 0 ? &typeinfo : NULL);
2204
2205 /* Insert name in all lowercase. */
2206 for (p = namebuf; *p; p++)
2207 *p = TOLOWER (*p);
2208
2209 if (strncmp (namebuf, newname, namelen))
2210 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2211 typeinfo.defined != 0 ? &typeinfo : NULL);
2212
2213 return 1;
2214 }
2215
2216 /* Should never be called, as .req goes between the alias and the
2217 register name, not at the beginning of the line. */
2218 static void
2219 s_req (int a ATTRIBUTE_UNUSED)
2220 {
2221 as_bad (_("invalid syntax for .req directive"));
2222 }
2223
2224 static void
2225 s_dn (int a ATTRIBUTE_UNUSED)
2226 {
2227 as_bad (_("invalid syntax for .dn directive"));
2228 }
2229
2230 static void
2231 s_qn (int a ATTRIBUTE_UNUSED)
2232 {
2233 as_bad (_("invalid syntax for .qn directive"));
2234 }
2235
2236 /* The .unreq directive deletes an alias which was previously defined
2237 by .req. For example:
2238
2239 my_alias .req r11
2240 .unreq my_alias */
2241
2242 static void
2243 s_unreq (int a ATTRIBUTE_UNUSED)
2244 {
2245 char * name;
2246 char saved_char;
2247
2248 name = input_line_pointer;
2249
2250 while (*input_line_pointer != 0
2251 && *input_line_pointer != ' '
2252 && *input_line_pointer != '\n')
2253 ++input_line_pointer;
2254
2255 saved_char = *input_line_pointer;
2256 *input_line_pointer = 0;
2257
2258 if (!*name)
2259 as_bad (_("invalid syntax for .unreq directive"));
2260 else
2261 {
2262 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2263
2264 if (!reg)
2265 as_bad (_("unknown register alias '%s'"), name);
2266 else if (reg->builtin)
2267 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2268 name);
2269 else
2270 {
2271 hash_delete (arm_reg_hsh, name);
2272 free ((char *) reg->name);
2273 if (reg->neon)
2274 free (reg->neon);
2275 free (reg);
2276 }
2277 }
2278
2279 *input_line_pointer = saved_char;
2280 demand_empty_rest_of_line ();
2281 }
2282
2283 /* Directives: Instruction set selection. */
2284
2285 #ifdef OBJ_ELF
2286 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2287 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2288 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2289 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2290
2291 static enum mstate mapstate = MAP_UNDEFINED;
2292
2293 void
2294 mapping_state (enum mstate state)
2295 {
2296 symbolS * symbolP;
2297 const char * symname;
2298 int type;
2299
2300 if (mapstate == state)
2301 /* The mapping symbol has already been emitted.
2302 There is nothing else to do. */
2303 return;
2304
2305 mapstate = state;
2306
2307 switch (state)
2308 {
2309 case MAP_DATA:
2310 symname = "$d";
2311 type = BSF_NO_FLAGS;
2312 break;
2313 case MAP_ARM:
2314 symname = "$a";
2315 type = BSF_NO_FLAGS;
2316 break;
2317 case MAP_THUMB:
2318 symname = "$t";
2319 type = BSF_NO_FLAGS;
2320 break;
2321 case MAP_UNDEFINED:
2322 return;
2323 default:
2324 abort ();
2325 }
2326
2327 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2328
2329 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2330 symbol_table_insert (symbolP);
2331 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2332
2333 switch (state)
2334 {
2335 case MAP_ARM:
2336 THUMB_SET_FUNC (symbolP, 0);
2337 ARM_SET_THUMB (symbolP, 0);
2338 ARM_SET_INTERWORK (symbolP, support_interwork);
2339 break;
2340
2341 case MAP_THUMB:
2342 THUMB_SET_FUNC (symbolP, 1);
2343 ARM_SET_THUMB (symbolP, 1);
2344 ARM_SET_INTERWORK (symbolP, support_interwork);
2345 break;
2346
2347 case MAP_DATA:
2348 default:
2349 return;
2350 }
2351 }
2352 #else
2353 #define mapping_state(x) /* nothing */
2354 #endif
2355
2356 /* Find the real, Thumb encoded start of a Thumb function. */
2357
2358 static symbolS *
2359 find_real_start (symbolS * symbolP)
2360 {
2361 char * real_start;
2362 const char * name = S_GET_NAME (symbolP);
2363 symbolS * new_target;
2364
2365 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2366 #define STUB_NAME ".real_start_of"
2367
2368 if (name == NULL)
2369 abort ();
2370
2371 /* The compiler may generate BL instructions to local labels because
2372 it needs to perform a branch to a far away location. These labels
2373 do not have a corresponding ".real_start_of" label. We check
2374 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2375 the ".real_start_of" convention for nonlocal branches. */
2376 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2377 return symbolP;
2378
2379 real_start = ACONCAT ((STUB_NAME, name, NULL));
2380 new_target = symbol_find (real_start);
2381
2382 if (new_target == NULL)
2383 {
2384 as_warn ("Failed to find real start of function: %s\n", name);
2385 new_target = symbolP;
2386 }
2387
2388 return new_target;
2389 }
2390
2391 static void
2392 opcode_select (int width)
2393 {
2394 switch (width)
2395 {
2396 case 16:
2397 if (! thumb_mode)
2398 {
2399 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2400 as_bad (_("selected processor does not support THUMB opcodes"));
2401
2402 thumb_mode = 1;
2403 /* No need to force the alignment, since we will have been
2404 coming from ARM mode, which is word-aligned. */
2405 record_alignment (now_seg, 1);
2406 }
2407 mapping_state (MAP_THUMB);
2408 break;
2409
2410 case 32:
2411 if (thumb_mode)
2412 {
2413 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2414 as_bad (_("selected processor does not support ARM opcodes"));
2415
2416 thumb_mode = 0;
2417
2418 if (!need_pass_2)
2419 frag_align (2, 0, 0);
2420
2421 record_alignment (now_seg, 1);
2422 }
2423 mapping_state (MAP_ARM);
2424 break;
2425
2426 default:
2427 as_bad (_("invalid instruction size selected (%d)"), width);
2428 }
2429 }
2430
2431 static void
2432 s_arm (int ignore ATTRIBUTE_UNUSED)
2433 {
2434 opcode_select (32);
2435 demand_empty_rest_of_line ();
2436 }
2437
2438 static void
2439 s_thumb (int ignore ATTRIBUTE_UNUSED)
2440 {
2441 opcode_select (16);
2442 demand_empty_rest_of_line ();
2443 }
2444
2445 static void
2446 s_code (int unused ATTRIBUTE_UNUSED)
2447 {
2448 int temp;
2449
2450 temp = get_absolute_expression ();
2451 switch (temp)
2452 {
2453 case 16:
2454 case 32:
2455 opcode_select (temp);
2456 break;
2457
2458 default:
2459 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2460 }
2461 }
2462
2463 static void
2464 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2465 {
2466 /* If we are not already in thumb mode go into it, EVEN if
2467 the target processor does not support thumb instructions.
2468 This is used by gcc/config/arm/lib1funcs.asm for example
2469 to compile interworking support functions even if the
2470 target processor should not support interworking. */
2471 if (! thumb_mode)
2472 {
2473 thumb_mode = 2;
2474 record_alignment (now_seg, 1);
2475 }
2476
2477 demand_empty_rest_of_line ();
2478 }
2479
2480 static void
2481 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2482 {
2483 s_thumb (0);
2484
2485 /* The following label is the name/address of the start of a Thumb function.
2486 We need to know this for the interworking support. */
2487 label_is_thumb_function_name = TRUE;
2488 }
2489
2490 /* Perform a .set directive, but also mark the alias as
2491 being a thumb function. */
2492
2493 static void
2494 s_thumb_set (int equiv)
2495 {
2496 /* XXX the following is a duplicate of the code for s_set() in read.c
2497 We cannot just call that code as we need to get at the symbol that
2498 is created. */
2499 char * name;
2500 char delim;
2501 char * end_name;
2502 symbolS * symbolP;
2503
2504 /* Especial apologies for the random logic:
2505 This just grew, and could be parsed much more simply!
2506 Dean - in haste. */
2507 name = input_line_pointer;
2508 delim = get_symbol_end ();
2509 end_name = input_line_pointer;
2510 *end_name = delim;
2511
2512 if (*input_line_pointer != ',')
2513 {
2514 *end_name = 0;
2515 as_bad (_("expected comma after name \"%s\""), name);
2516 *end_name = delim;
2517 ignore_rest_of_line ();
2518 return;
2519 }
2520
2521 input_line_pointer++;
2522 *end_name = 0;
2523
2524 if (name[0] == '.' && name[1] == '\0')
2525 {
2526 /* XXX - this should not happen to .thumb_set. */
2527 abort ();
2528 }
2529
2530 if ((symbolP = symbol_find (name)) == NULL
2531 && (symbolP = md_undefined_symbol (name)) == NULL)
2532 {
2533 #ifndef NO_LISTING
2534 /* When doing symbol listings, play games with dummy fragments living
2535 outside the normal fragment chain to record the file and line info
2536 for this symbol. */
2537 if (listing & LISTING_SYMBOLS)
2538 {
2539 extern struct list_info_struct * listing_tail;
2540 fragS * dummy_frag = xmalloc (sizeof (fragS));
2541
2542 memset (dummy_frag, 0, sizeof (fragS));
2543 dummy_frag->fr_type = rs_fill;
2544 dummy_frag->line = listing_tail;
2545 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2546 dummy_frag->fr_symbol = symbolP;
2547 }
2548 else
2549 #endif
2550 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2551
2552 #ifdef OBJ_COFF
2553 /* "set" symbols are local unless otherwise specified. */
2554 SF_SET_LOCAL (symbolP);
2555 #endif /* OBJ_COFF */
2556 } /* Make a new symbol. */
2557
2558 symbol_table_insert (symbolP);
2559
2560 * end_name = delim;
2561
2562 if (equiv
2563 && S_IS_DEFINED (symbolP)
2564 && S_GET_SEGMENT (symbolP) != reg_section)
2565 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2566
2567 pseudo_set (symbolP);
2568
2569 demand_empty_rest_of_line ();
2570
2571 /* XXX Now we come to the Thumb specific bit of code. */
2572
2573 THUMB_SET_FUNC (symbolP, 1);
2574 ARM_SET_THUMB (symbolP, 1);
2575 #if defined OBJ_ELF || defined OBJ_COFF
2576 ARM_SET_INTERWORK (symbolP, support_interwork);
2577 #endif
2578 }
2579
2580 /* Directives: Mode selection. */
2581
2582 /* .syntax [unified|divided] - choose the new unified syntax
2583 (same for Arm and Thumb encoding, modulo slight differences in what
2584 can be represented) or the old divergent syntax for each mode. */
2585 static void
2586 s_syntax (int unused ATTRIBUTE_UNUSED)
2587 {
2588 char *name, delim;
2589
2590 name = input_line_pointer;
2591 delim = get_symbol_end ();
2592
2593 if (!strcasecmp (name, "unified"))
2594 unified_syntax = TRUE;
2595 else if (!strcasecmp (name, "divided"))
2596 unified_syntax = FALSE;
2597 else
2598 {
2599 as_bad (_("unrecognized syntax mode \"%s\""), name);
2600 return;
2601 }
2602 *input_line_pointer = delim;
2603 demand_empty_rest_of_line ();
2604 }
2605
2606 /* Directives: sectioning and alignment. */
2607
2608 /* Same as s_align_ptwo but align 0 => align 2. */
2609
2610 static void
2611 s_align (int unused ATTRIBUTE_UNUSED)
2612 {
2613 int temp;
2614 long temp_fill;
2615 long max_alignment = 15;
2616
2617 temp = get_absolute_expression ();
2618 if (temp > max_alignment)
2619 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2620 else if (temp < 0)
2621 {
2622 as_bad (_("alignment negative. 0 assumed."));
2623 temp = 0;
2624 }
2625
2626 if (*input_line_pointer == ',')
2627 {
2628 input_line_pointer++;
2629 temp_fill = get_absolute_expression ();
2630 }
2631 else
2632 temp_fill = 0;
2633
2634 if (!temp)
2635 temp = 2;
2636
2637 /* Only make a frag if we HAVE to. */
2638 if (temp && !need_pass_2)
2639 frag_align (temp, (int) temp_fill, 0);
2640 demand_empty_rest_of_line ();
2641
2642 record_alignment (now_seg, temp);
2643 }
2644
2645 static void
2646 s_bss (int ignore ATTRIBUTE_UNUSED)
2647 {
2648 /* We don't support putting frags in the BSS segment, we fake it by
2649 marking in_bss, then looking at s_skip for clues. */
2650 subseg_set (bss_section, 0);
2651 demand_empty_rest_of_line ();
2652 mapping_state (MAP_DATA);
2653 }
2654
2655 static void
2656 s_even (int ignore ATTRIBUTE_UNUSED)
2657 {
2658 /* Never make frag if expect extra pass. */
2659 if (!need_pass_2)
2660 frag_align (1, 0, 0);
2661
2662 record_alignment (now_seg, 1);
2663
2664 demand_empty_rest_of_line ();
2665 }
2666
2667 /* Directives: Literal pools. */
2668
2669 static literal_pool *
2670 find_literal_pool (void)
2671 {
2672 literal_pool * pool;
2673
2674 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2675 {
2676 if (pool->section == now_seg
2677 && pool->sub_section == now_subseg)
2678 break;
2679 }
2680
2681 return pool;
2682 }
2683
2684 static literal_pool *
2685 find_or_make_literal_pool (void)
2686 {
2687 /* Next literal pool ID number. */
2688 static unsigned int latest_pool_num = 1;
2689 literal_pool * pool;
2690
2691 pool = find_literal_pool ();
2692
2693 if (pool == NULL)
2694 {
2695 /* Create a new pool. */
2696 pool = xmalloc (sizeof (* pool));
2697 if (! pool)
2698 return NULL;
2699
2700 pool->next_free_entry = 0;
2701 pool->section = now_seg;
2702 pool->sub_section = now_subseg;
2703 pool->next = list_of_pools;
2704 pool->symbol = NULL;
2705
2706 /* Add it to the list. */
2707 list_of_pools = pool;
2708 }
2709
2710 /* New pools, and emptied pools, will have a NULL symbol. */
2711 if (pool->symbol == NULL)
2712 {
2713 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2714 (valueT) 0, &zero_address_frag);
2715 pool->id = latest_pool_num ++;
2716 }
2717
2718 /* Done. */
2719 return pool;
2720 }
2721
2722 /* Add the literal in the global 'inst'
2723 structure to the relevent literal pool. */
2724
2725 static int
2726 add_to_lit_pool (void)
2727 {
2728 literal_pool * pool;
2729 unsigned int entry;
2730
2731 pool = find_or_make_literal_pool ();
2732
2733 /* Check if this literal value is already in the pool. */
2734 for (entry = 0; entry < pool->next_free_entry; entry ++)
2735 {
2736 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2737 && (inst.reloc.exp.X_op == O_constant)
2738 && (pool->literals[entry].X_add_number
2739 == inst.reloc.exp.X_add_number)
2740 && (pool->literals[entry].X_unsigned
2741 == inst.reloc.exp.X_unsigned))
2742 break;
2743
2744 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2745 && (inst.reloc.exp.X_op == O_symbol)
2746 && (pool->literals[entry].X_add_number
2747 == inst.reloc.exp.X_add_number)
2748 && (pool->literals[entry].X_add_symbol
2749 == inst.reloc.exp.X_add_symbol)
2750 && (pool->literals[entry].X_op_symbol
2751 == inst.reloc.exp.X_op_symbol))
2752 break;
2753 }
2754
2755 /* Do we need to create a new entry? */
2756 if (entry == pool->next_free_entry)
2757 {
2758 if (entry >= MAX_LITERAL_POOL_SIZE)
2759 {
2760 inst.error = _("literal pool overflow");
2761 return FAIL;
2762 }
2763
2764 pool->literals[entry] = inst.reloc.exp;
2765 pool->next_free_entry += 1;
2766 }
2767
2768 inst.reloc.exp.X_op = O_symbol;
2769 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2770 inst.reloc.exp.X_add_symbol = pool->symbol;
2771
2772 return SUCCESS;
2773 }
2774
2775 /* Can't use symbol_new here, so have to create a symbol and then at
2776 a later date assign it a value. Thats what these functions do. */
2777
2778 static void
2779 symbol_locate (symbolS * symbolP,
2780 const char * name, /* It is copied, the caller can modify. */
2781 segT segment, /* Segment identifier (SEG_<something>). */
2782 valueT valu, /* Symbol value. */
2783 fragS * frag) /* Associated fragment. */
2784 {
2785 unsigned int name_length;
2786 char * preserved_copy_of_name;
2787
2788 name_length = strlen (name) + 1; /* +1 for \0. */
2789 obstack_grow (&notes, name, name_length);
2790 preserved_copy_of_name = obstack_finish (&notes);
2791
2792 #ifdef tc_canonicalize_symbol_name
2793 preserved_copy_of_name =
2794 tc_canonicalize_symbol_name (preserved_copy_of_name);
2795 #endif
2796
2797 S_SET_NAME (symbolP, preserved_copy_of_name);
2798
2799 S_SET_SEGMENT (symbolP, segment);
2800 S_SET_VALUE (symbolP, valu);
2801 symbol_clear_list_pointers (symbolP);
2802
2803 symbol_set_frag (symbolP, frag);
2804
2805 /* Link to end of symbol chain. */
2806 {
2807 extern int symbol_table_frozen;
2808
2809 if (symbol_table_frozen)
2810 abort ();
2811 }
2812
2813 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2814
2815 obj_symbol_new_hook (symbolP);
2816
2817 #ifdef tc_symbol_new_hook
2818 tc_symbol_new_hook (symbolP);
2819 #endif
2820
2821 #ifdef DEBUG_SYMS
2822 verify_symbol_chain (symbol_rootP, symbol_lastP);
2823 #endif /* DEBUG_SYMS */
2824 }
2825
2826
2827 static void
2828 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2829 {
2830 unsigned int entry;
2831 literal_pool * pool;
2832 char sym_name[20];
2833
2834 pool = find_literal_pool ();
2835 if (pool == NULL
2836 || pool->symbol == NULL
2837 || pool->next_free_entry == 0)
2838 return;
2839
2840 mapping_state (MAP_DATA);
2841
2842 /* Align pool as you have word accesses.
2843 Only make a frag if we have to. */
2844 if (!need_pass_2)
2845 frag_align (2, 0, 0);
2846
2847 record_alignment (now_seg, 2);
2848
2849 sprintf (sym_name, "$$lit_\002%x", pool->id);
2850
2851 symbol_locate (pool->symbol, sym_name, now_seg,
2852 (valueT) frag_now_fix (), frag_now);
2853 symbol_table_insert (pool->symbol);
2854
2855 ARM_SET_THUMB (pool->symbol, thumb_mode);
2856
2857 #if defined OBJ_COFF || defined OBJ_ELF
2858 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2859 #endif
2860
2861 for (entry = 0; entry < pool->next_free_entry; entry ++)
2862 /* First output the expression in the instruction to the pool. */
2863 emit_expr (&(pool->literals[entry]), 4); /* .word */
2864
2865 /* Mark the pool as empty. */
2866 pool->next_free_entry = 0;
2867 pool->symbol = NULL;
2868 }
2869
2870 #ifdef OBJ_ELF
2871 /* Forward declarations for functions below, in the MD interface
2872 section. */
2873 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2874 static valueT create_unwind_entry (int);
2875 static void start_unwind_section (const segT, int);
2876 static void add_unwind_opcode (valueT, int);
2877 static void flush_pending_unwind (void);
2878
2879 /* Directives: Data. */
2880
2881 static void
2882 s_arm_elf_cons (int nbytes)
2883 {
2884 expressionS exp;
2885
2886 #ifdef md_flush_pending_output
2887 md_flush_pending_output ();
2888 #endif
2889
2890 if (is_it_end_of_statement ())
2891 {
2892 demand_empty_rest_of_line ();
2893 return;
2894 }
2895
2896 #ifdef md_cons_align
2897 md_cons_align (nbytes);
2898 #endif
2899
2900 mapping_state (MAP_DATA);
2901 do
2902 {
2903 int reloc;
2904 char *base = input_line_pointer;
2905
2906 expression (& exp);
2907
2908 if (exp.X_op != O_symbol)
2909 emit_expr (&exp, (unsigned int) nbytes);
2910 else
2911 {
2912 char *before_reloc = input_line_pointer;
2913 reloc = parse_reloc (&input_line_pointer);
2914 if (reloc == -1)
2915 {
2916 as_bad (_("unrecognized relocation suffix"));
2917 ignore_rest_of_line ();
2918 return;
2919 }
2920 else if (reloc == BFD_RELOC_UNUSED)
2921 emit_expr (&exp, (unsigned int) nbytes);
2922 else
2923 {
2924 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2925 int size = bfd_get_reloc_size (howto);
2926
2927 if (reloc == BFD_RELOC_ARM_PLT32)
2928 {
2929 as_bad (_("(plt) is only valid on branch targets"));
2930 reloc = BFD_RELOC_UNUSED;
2931 size = 0;
2932 }
2933
2934 if (size > nbytes)
2935 as_bad (_("%s relocations do not fit in %d bytes"),
2936 howto->name, nbytes);
2937 else
2938 {
2939 /* We've parsed an expression stopping at O_symbol.
2940 But there may be more expression left now that we
2941 have parsed the relocation marker. Parse it again.
2942 XXX Surely there is a cleaner way to do this. */
2943 char *p = input_line_pointer;
2944 int offset;
2945 char *save_buf = alloca (input_line_pointer - base);
2946 memcpy (save_buf, base, input_line_pointer - base);
2947 memmove (base + (input_line_pointer - before_reloc),
2948 base, before_reloc - base);
2949
2950 input_line_pointer = base + (input_line_pointer-before_reloc);
2951 expression (&exp);
2952 memcpy (base, save_buf, p - base);
2953
2954 offset = nbytes - size;
2955 p = frag_more ((int) nbytes);
2956 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2957 size, &exp, 0, reloc);
2958 }
2959 }
2960 }
2961 }
2962 while (*input_line_pointer++ == ',');
2963
2964 /* Put terminator back into stream. */
2965 input_line_pointer --;
2966 demand_empty_rest_of_line ();
2967 }
2968
2969
2970 /* Parse a .rel31 directive. */
2971
2972 static void
2973 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2974 {
2975 expressionS exp;
2976 char *p;
2977 valueT highbit;
2978
2979 highbit = 0;
2980 if (*input_line_pointer == '1')
2981 highbit = 0x80000000;
2982 else if (*input_line_pointer != '0')
2983 as_bad (_("expected 0 or 1"));
2984
2985 input_line_pointer++;
2986 if (*input_line_pointer != ',')
2987 as_bad (_("missing comma"));
2988 input_line_pointer++;
2989
2990 #ifdef md_flush_pending_output
2991 md_flush_pending_output ();
2992 #endif
2993
2994 #ifdef md_cons_align
2995 md_cons_align (4);
2996 #endif
2997
2998 mapping_state (MAP_DATA);
2999
3000 expression (&exp);
3001
3002 p = frag_more (4);
3003 md_number_to_chars (p, highbit, 4);
3004 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3005 BFD_RELOC_ARM_PREL31);
3006
3007 demand_empty_rest_of_line ();
3008 }
3009
3010 /* Directives: AEABI stack-unwind tables. */
3011
3012 /* Parse an unwind_fnstart directive. Simply records the current location. */
3013
3014 static void
3015 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3016 {
3017 demand_empty_rest_of_line ();
3018 /* Mark the start of the function. */
3019 unwind.proc_start = expr_build_dot ();
3020
3021 /* Reset the rest of the unwind info. */
3022 unwind.opcode_count = 0;
3023 unwind.table_entry = NULL;
3024 unwind.personality_routine = NULL;
3025 unwind.personality_index = -1;
3026 unwind.frame_size = 0;
3027 unwind.fp_offset = 0;
3028 unwind.fp_reg = 13;
3029 unwind.fp_used = 0;
3030 unwind.sp_restored = 0;
3031 }
3032
3033
3034 /* Parse a handlerdata directive. Creates the exception handling table entry
3035 for the function. */
3036
3037 static void
3038 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3039 {
3040 demand_empty_rest_of_line ();
3041 if (unwind.table_entry)
3042 as_bad (_("dupicate .handlerdata directive"));
3043
3044 create_unwind_entry (1);
3045 }
3046
3047 /* Parse an unwind_fnend directive. Generates the index table entry. */
3048
3049 static void
3050 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3051 {
3052 long where;
3053 char *ptr;
3054 valueT val;
3055
3056 demand_empty_rest_of_line ();
3057
3058 /* Add eh table entry. */
3059 if (unwind.table_entry == NULL)
3060 val = create_unwind_entry (0);
3061 else
3062 val = 0;
3063
3064 /* Add index table entry. This is two words. */
3065 start_unwind_section (unwind.saved_seg, 1);
3066 frag_align (2, 0, 0);
3067 record_alignment (now_seg, 2);
3068
3069 ptr = frag_more (8);
3070 where = frag_now_fix () - 8;
3071
3072 /* Self relative offset of the function start. */
3073 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3074 BFD_RELOC_ARM_PREL31);
3075
3076 /* Indicate dependency on EHABI-defined personality routines to the
3077 linker, if it hasn't been done already. */
3078 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3079 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3080 {
3081 static const char *const name[] = {
3082 "__aeabi_unwind_cpp_pr0",
3083 "__aeabi_unwind_cpp_pr1",
3084 "__aeabi_unwind_cpp_pr2"
3085 };
3086 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3087 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3088 marked_pr_dependency |= 1 << unwind.personality_index;
3089 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3090 = marked_pr_dependency;
3091 }
3092
3093 if (val)
3094 /* Inline exception table entry. */
3095 md_number_to_chars (ptr + 4, val, 4);
3096 else
3097 /* Self relative offset of the table entry. */
3098 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3099 BFD_RELOC_ARM_PREL31);
3100
3101 /* Restore the original section. */
3102 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3103 }
3104
3105
3106 /* Parse an unwind_cantunwind directive. */
3107
3108 static void
3109 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3110 {
3111 demand_empty_rest_of_line ();
3112 if (unwind.personality_routine || unwind.personality_index != -1)
3113 as_bad (_("personality routine specified for cantunwind frame"));
3114
3115 unwind.personality_index = -2;
3116 }
3117
3118
3119 /* Parse a personalityindex directive. */
3120
3121 static void
3122 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3123 {
3124 expressionS exp;
3125
3126 if (unwind.personality_routine || unwind.personality_index != -1)
3127 as_bad (_("duplicate .personalityindex directive"));
3128
3129 expression (&exp);
3130
3131 if (exp.X_op != O_constant
3132 || exp.X_add_number < 0 || exp.X_add_number > 15)
3133 {
3134 as_bad (_("bad personality routine number"));
3135 ignore_rest_of_line ();
3136 return;
3137 }
3138
3139 unwind.personality_index = exp.X_add_number;
3140
3141 demand_empty_rest_of_line ();
3142 }
3143
3144
3145 /* Parse a personality directive. */
3146
3147 static void
3148 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3149 {
3150 char *name, *p, c;
3151
3152 if (unwind.personality_routine || unwind.personality_index != -1)
3153 as_bad (_("duplicate .personality directive"));
3154
3155 name = input_line_pointer;
3156 c = get_symbol_end ();
3157 p = input_line_pointer;
3158 unwind.personality_routine = symbol_find_or_make (name);
3159 *p = c;
3160 demand_empty_rest_of_line ();
3161 }
3162
3163
3164 /* Parse a directive saving core registers. */
3165
3166 static void
3167 s_arm_unwind_save_core (void)
3168 {
3169 valueT op;
3170 long range;
3171 int n;
3172
3173 range = parse_reg_list (&input_line_pointer);
3174 if (range == FAIL)
3175 {
3176 as_bad (_("expected register list"));
3177 ignore_rest_of_line ();
3178 return;
3179 }
3180
3181 demand_empty_rest_of_line ();
3182
3183 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3184 into .unwind_save {..., sp...}. We aren't bothered about the value of
3185 ip because it is clobbered by calls. */
3186 if (unwind.sp_restored && unwind.fp_reg == 12
3187 && (range & 0x3000) == 0x1000)
3188 {
3189 unwind.opcode_count--;
3190 unwind.sp_restored = 0;
3191 range = (range | 0x2000) & ~0x1000;
3192 unwind.pending_offset = 0;
3193 }
3194
3195 /* Pop r4-r15. */
3196 if (range & 0xfff0)
3197 {
3198 /* See if we can use the short opcodes. These pop a block of up to 8
3199 registers starting with r4, plus maybe r14. */
3200 for (n = 0; n < 8; n++)
3201 {
3202 /* Break at the first non-saved register. */
3203 if ((range & (1 << (n + 4))) == 0)
3204 break;
3205 }
3206 /* See if there are any other bits set. */
3207 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3208 {
3209 /* Use the long form. */
3210 op = 0x8000 | ((range >> 4) & 0xfff);
3211 add_unwind_opcode (op, 2);
3212 }
3213 else
3214 {
3215 /* Use the short form. */
3216 if (range & 0x4000)
3217 op = 0xa8; /* Pop r14. */
3218 else
3219 op = 0xa0; /* Do not pop r14. */
3220 op |= (n - 1);
3221 add_unwind_opcode (op, 1);
3222 }
3223 }
3224
3225 /* Pop r0-r3. */
3226 if (range & 0xf)
3227 {
3228 op = 0xb100 | (range & 0xf);
3229 add_unwind_opcode (op, 2);
3230 }
3231
3232 /* Record the number of bytes pushed. */
3233 for (n = 0; n < 16; n++)
3234 {
3235 if (range & (1 << n))
3236 unwind.frame_size += 4;
3237 }
3238 }
3239
3240
3241 /* Parse a directive saving FPA registers. */
3242
3243 static void
3244 s_arm_unwind_save_fpa (int reg)
3245 {
3246 expressionS exp;
3247 int num_regs;
3248 valueT op;
3249
3250 /* Get Number of registers to transfer. */
3251 if (skip_past_comma (&input_line_pointer) != FAIL)
3252 expression (&exp);
3253 else
3254 exp.X_op = O_illegal;
3255
3256 if (exp.X_op != O_constant)
3257 {
3258 as_bad (_("expected , <constant>"));
3259 ignore_rest_of_line ();
3260 return;
3261 }
3262
3263 num_regs = exp.X_add_number;
3264
3265 if (num_regs < 1 || num_regs > 4)
3266 {
3267 as_bad (_("number of registers must be in the range [1:4]"));
3268 ignore_rest_of_line ();
3269 return;
3270 }
3271
3272 demand_empty_rest_of_line ();
3273
3274 if (reg == 4)
3275 {
3276 /* Short form. */
3277 op = 0xb4 | (num_regs - 1);
3278 add_unwind_opcode (op, 1);
3279 }
3280 else
3281 {
3282 /* Long form. */
3283 op = 0xc800 | (reg << 4) | (num_regs - 1);
3284 add_unwind_opcode (op, 2);
3285 }
3286 unwind.frame_size += num_regs * 12;
3287 }
3288
3289
3290 /* Parse a directive saving VFP registers for ARMv6 and above. */
3291
3292 static void
3293 s_arm_unwind_save_vfp_armv6 (void)
3294 {
3295 int count;
3296 unsigned int start;
3297 valueT op;
3298 int num_vfpv3_regs = 0;
3299 int num_regs_below_16;
3300
3301 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3302 if (count == FAIL)
3303 {
3304 as_bad (_("expected register list"));
3305 ignore_rest_of_line ();
3306 return;
3307 }
3308
3309 demand_empty_rest_of_line ();
3310
3311 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3312 than FSTMX/FLDMX-style ones). */
3313
3314 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3315 if (start >= 16)
3316 num_vfpv3_regs = count;
3317 else if (start + count > 16)
3318 num_vfpv3_regs = start + count - 16;
3319
3320 if (num_vfpv3_regs > 0)
3321 {
3322 int start_offset = start > 16 ? start - 16 : 0;
3323 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3324 add_unwind_opcode (op, 2);
3325 }
3326
3327 /* Generate opcode for registers numbered in the range 0 .. 15. */
3328 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3329 assert (num_regs_below_16 + num_vfpv3_regs == count);
3330 if (num_regs_below_16 > 0)
3331 {
3332 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3333 add_unwind_opcode (op, 2);
3334 }
3335
3336 unwind.frame_size += count * 8;
3337 }
3338
3339
3340 /* Parse a directive saving VFP registers for pre-ARMv6. */
3341
3342 static void
3343 s_arm_unwind_save_vfp (void)
3344 {
3345 int count;
3346 unsigned int reg;
3347 valueT op;
3348
3349 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3350 if (count == FAIL)
3351 {
3352 as_bad (_("expected register list"));
3353 ignore_rest_of_line ();
3354 return;
3355 }
3356
3357 demand_empty_rest_of_line ();
3358
3359 if (reg == 8)
3360 {
3361 /* Short form. */
3362 op = 0xb8 | (count - 1);
3363 add_unwind_opcode (op, 1);
3364 }
3365 else
3366 {
3367 /* Long form. */
3368 op = 0xb300 | (reg << 4) | (count - 1);
3369 add_unwind_opcode (op, 2);
3370 }
3371 unwind.frame_size += count * 8 + 4;
3372 }
3373
3374
3375 /* Parse a directive saving iWMMXt data registers. */
3376
3377 static void
3378 s_arm_unwind_save_mmxwr (void)
3379 {
3380 int reg;
3381 int hi_reg;
3382 int i;
3383 unsigned mask = 0;
3384 valueT op;
3385
3386 if (*input_line_pointer == '{')
3387 input_line_pointer++;
3388
3389 do
3390 {
3391 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3392
3393 if (reg == FAIL)
3394 {
3395 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3396 goto error;
3397 }
3398
3399 if (mask >> reg)
3400 as_tsktsk (_("register list not in ascending order"));
3401 mask |= 1 << reg;
3402
3403 if (*input_line_pointer == '-')
3404 {
3405 input_line_pointer++;
3406 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3407 if (hi_reg == FAIL)
3408 {
3409 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3410 goto error;
3411 }
3412 else if (reg >= hi_reg)
3413 {
3414 as_bad (_("bad register range"));
3415 goto error;
3416 }
3417 for (; reg < hi_reg; reg++)
3418 mask |= 1 << reg;
3419 }
3420 }
3421 while (skip_past_comma (&input_line_pointer) != FAIL);
3422
3423 if (*input_line_pointer == '}')
3424 input_line_pointer++;
3425
3426 demand_empty_rest_of_line ();
3427
3428 /* Generate any deferred opcodes because we're going to be looking at
3429 the list. */
3430 flush_pending_unwind ();
3431
3432 for (i = 0; i < 16; i++)
3433 {
3434 if (mask & (1 << i))
3435 unwind.frame_size += 8;
3436 }
3437
3438 /* Attempt to combine with a previous opcode. We do this because gcc
3439 likes to output separate unwind directives for a single block of
3440 registers. */
3441 if (unwind.opcode_count > 0)
3442 {
3443 i = unwind.opcodes[unwind.opcode_count - 1];
3444 if ((i & 0xf8) == 0xc0)
3445 {
3446 i &= 7;
3447 /* Only merge if the blocks are contiguous. */
3448 if (i < 6)
3449 {
3450 if ((mask & 0xfe00) == (1 << 9))
3451 {
3452 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3453 unwind.opcode_count--;
3454 }
3455 }
3456 else if (i == 6 && unwind.opcode_count >= 2)
3457 {
3458 i = unwind.opcodes[unwind.opcode_count - 2];
3459 reg = i >> 4;
3460 i &= 0xf;
3461
3462 op = 0xffff << (reg - 1);
3463 if (reg > 0
3464 && ((mask & op) == (1u << (reg - 1))))
3465 {
3466 op = (1 << (reg + i + 1)) - 1;
3467 op &= ~((1 << reg) - 1);
3468 mask |= op;
3469 unwind.opcode_count -= 2;
3470 }
3471 }
3472 }
3473 }
3474
3475 hi_reg = 15;
3476 /* We want to generate opcodes in the order the registers have been
3477 saved, ie. descending order. */
3478 for (reg = 15; reg >= -1; reg--)
3479 {
3480 /* Save registers in blocks. */
3481 if (reg < 0
3482 || !(mask & (1 << reg)))
3483 {
3484 /* We found an unsaved reg. Generate opcodes to save the
3485 preceeding block. */
3486 if (reg != hi_reg)
3487 {
3488 if (reg == 9)
3489 {
3490 /* Short form. */
3491 op = 0xc0 | (hi_reg - 10);
3492 add_unwind_opcode (op, 1);
3493 }
3494 else
3495 {
3496 /* Long form. */
3497 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3498 add_unwind_opcode (op, 2);
3499 }
3500 }
3501 hi_reg = reg - 1;
3502 }
3503 }
3504
3505 return;
3506 error:
3507 ignore_rest_of_line ();
3508 }
3509
3510 static void
3511 s_arm_unwind_save_mmxwcg (void)
3512 {
3513 int reg;
3514 int hi_reg;
3515 unsigned mask = 0;
3516 valueT op;
3517
3518 if (*input_line_pointer == '{')
3519 input_line_pointer++;
3520
3521 do
3522 {
3523 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3524
3525 if (reg == FAIL)
3526 {
3527 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3528 goto error;
3529 }
3530
3531 reg -= 8;
3532 if (mask >> reg)
3533 as_tsktsk (_("register list not in ascending order"));
3534 mask |= 1 << reg;
3535
3536 if (*input_line_pointer == '-')
3537 {
3538 input_line_pointer++;
3539 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3540 if (hi_reg == FAIL)
3541 {
3542 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3543 goto error;
3544 }
3545 else if (reg >= hi_reg)
3546 {
3547 as_bad (_("bad register range"));
3548 goto error;
3549 }
3550 for (; reg < hi_reg; reg++)
3551 mask |= 1 << reg;
3552 }
3553 }
3554 while (skip_past_comma (&input_line_pointer) != FAIL);
3555
3556 if (*input_line_pointer == '}')
3557 input_line_pointer++;
3558
3559 demand_empty_rest_of_line ();
3560
3561 /* Generate any deferred opcodes because we're going to be looking at
3562 the list. */
3563 flush_pending_unwind ();
3564
3565 for (reg = 0; reg < 16; reg++)
3566 {
3567 if (mask & (1 << reg))
3568 unwind.frame_size += 4;
3569 }
3570 op = 0xc700 | mask;
3571 add_unwind_opcode (op, 2);
3572 return;
3573 error:
3574 ignore_rest_of_line ();
3575 }
3576
3577
3578 /* Parse an unwind_save directive.
3579 If the argument is non-zero, this is a .vsave directive. */
3580
3581 static void
3582 s_arm_unwind_save (int arch_v6)
3583 {
3584 char *peek;
3585 struct reg_entry *reg;
3586 bfd_boolean had_brace = FALSE;
3587
3588 /* Figure out what sort of save we have. */
3589 peek = input_line_pointer;
3590
3591 if (*peek == '{')
3592 {
3593 had_brace = TRUE;
3594 peek++;
3595 }
3596
3597 reg = arm_reg_parse_multi (&peek);
3598
3599 if (!reg)
3600 {
3601 as_bad (_("register expected"));
3602 ignore_rest_of_line ();
3603 return;
3604 }
3605
3606 switch (reg->type)
3607 {
3608 case REG_TYPE_FN:
3609 if (had_brace)
3610 {
3611 as_bad (_("FPA .unwind_save does not take a register list"));
3612 ignore_rest_of_line ();
3613 return;
3614 }
3615 s_arm_unwind_save_fpa (reg->number);
3616 return;
3617
3618 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3619 case REG_TYPE_VFD:
3620 if (arch_v6)
3621 s_arm_unwind_save_vfp_armv6 ();
3622 else
3623 s_arm_unwind_save_vfp ();
3624 return;
3625 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3626 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3627
3628 default:
3629 as_bad (_(".unwind_save does not support this kind of register"));
3630 ignore_rest_of_line ();
3631 }
3632 }
3633
3634
3635 /* Parse an unwind_movsp directive. */
3636
3637 static void
3638 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3639 {
3640 int reg;
3641 valueT op;
3642 int offset;
3643
3644 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3645 if (reg == FAIL)
3646 {
3647 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3648 ignore_rest_of_line ();
3649 return;
3650 }
3651
3652 /* Optional constant. */
3653 if (skip_past_comma (&input_line_pointer) != FAIL)
3654 {
3655 if (immediate_for_directive (&offset) == FAIL)
3656 return;
3657 }
3658 else
3659 offset = 0;
3660
3661 demand_empty_rest_of_line ();
3662
3663 if (reg == REG_SP || reg == REG_PC)
3664 {
3665 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3666 return;
3667 }
3668
3669 if (unwind.fp_reg != REG_SP)
3670 as_bad (_("unexpected .unwind_movsp directive"));
3671
3672 /* Generate opcode to restore the value. */
3673 op = 0x90 | reg;
3674 add_unwind_opcode (op, 1);
3675
3676 /* Record the information for later. */
3677 unwind.fp_reg = reg;
3678 unwind.fp_offset = unwind.frame_size - offset;
3679 unwind.sp_restored = 1;
3680 }
3681
3682 /* Parse an unwind_pad directive. */
3683
3684 static void
3685 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3686 {
3687 int offset;
3688
3689 if (immediate_for_directive (&offset) == FAIL)
3690 return;
3691
3692 if (offset & 3)
3693 {
3694 as_bad (_("stack increment must be multiple of 4"));
3695 ignore_rest_of_line ();
3696 return;
3697 }
3698
3699 /* Don't generate any opcodes, just record the details for later. */
3700 unwind.frame_size += offset;
3701 unwind.pending_offset += offset;
3702
3703 demand_empty_rest_of_line ();
3704 }
3705
3706 /* Parse an unwind_setfp directive. */
3707
3708 static void
3709 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3710 {
3711 int sp_reg;
3712 int fp_reg;
3713 int offset;
3714
3715 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3716 if (skip_past_comma (&input_line_pointer) == FAIL)
3717 sp_reg = FAIL;
3718 else
3719 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3720
3721 if (fp_reg == FAIL || sp_reg == FAIL)
3722 {
3723 as_bad (_("expected <reg>, <reg>"));
3724 ignore_rest_of_line ();
3725 return;
3726 }
3727
3728 /* Optional constant. */
3729 if (skip_past_comma (&input_line_pointer) != FAIL)
3730 {
3731 if (immediate_for_directive (&offset) == FAIL)
3732 return;
3733 }
3734 else
3735 offset = 0;
3736
3737 demand_empty_rest_of_line ();
3738
3739 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3740 {
3741 as_bad (_("register must be either sp or set by a previous"
3742 "unwind_movsp directive"));
3743 return;
3744 }
3745
3746 /* Don't generate any opcodes, just record the information for later. */
3747 unwind.fp_reg = fp_reg;
3748 unwind.fp_used = 1;
3749 if (sp_reg == 13)
3750 unwind.fp_offset = unwind.frame_size - offset;
3751 else
3752 unwind.fp_offset -= offset;
3753 }
3754
3755 /* Parse an unwind_raw directive. */
3756
3757 static void
3758 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3759 {
3760 expressionS exp;
3761 /* This is an arbitrary limit. */
3762 unsigned char op[16];
3763 int count;
3764
3765 expression (&exp);
3766 if (exp.X_op == O_constant
3767 && skip_past_comma (&input_line_pointer) != FAIL)
3768 {
3769 unwind.frame_size += exp.X_add_number;
3770 expression (&exp);
3771 }
3772 else
3773 exp.X_op = O_illegal;
3774
3775 if (exp.X_op != O_constant)
3776 {
3777 as_bad (_("expected <offset>, <opcode>"));
3778 ignore_rest_of_line ();
3779 return;
3780 }
3781
3782 count = 0;
3783
3784 /* Parse the opcode. */
3785 for (;;)
3786 {
3787 if (count >= 16)
3788 {
3789 as_bad (_("unwind opcode too long"));
3790 ignore_rest_of_line ();
3791 }
3792 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3793 {
3794 as_bad (_("invalid unwind opcode"));
3795 ignore_rest_of_line ();
3796 return;
3797 }
3798 op[count++] = exp.X_add_number;
3799
3800 /* Parse the next byte. */
3801 if (skip_past_comma (&input_line_pointer) == FAIL)
3802 break;
3803
3804 expression (&exp);
3805 }
3806
3807 /* Add the opcode bytes in reverse order. */
3808 while (count--)
3809 add_unwind_opcode (op[count], 1);
3810
3811 demand_empty_rest_of_line ();
3812 }
3813
3814
3815 /* Parse a .eabi_attribute directive. */
3816
3817 static void
3818 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3819 {
3820 expressionS exp;
3821 bfd_boolean is_string;
3822 int tag;
3823 unsigned int i = 0;
3824 char *s = NULL;
3825 char saved_char;
3826
3827 expression (& exp);
3828 if (exp.X_op != O_constant)
3829 goto bad;
3830
3831 tag = exp.X_add_number;
3832 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3833 is_string = 1;
3834 else
3835 is_string = 0;
3836
3837 if (skip_past_comma (&input_line_pointer) == FAIL)
3838 goto bad;
3839 if (tag == 32 || !is_string)
3840 {
3841 expression (& exp);
3842 if (exp.X_op != O_constant)
3843 {
3844 as_bad (_("expected numeric constant"));
3845 ignore_rest_of_line ();
3846 return;
3847 }
3848 i = exp.X_add_number;
3849 }
3850 if (tag == Tag_compatibility
3851 && skip_past_comma (&input_line_pointer) == FAIL)
3852 {
3853 as_bad (_("expected comma"));
3854 ignore_rest_of_line ();
3855 return;
3856 }
3857 if (is_string)
3858 {
3859 skip_whitespace(input_line_pointer);
3860 if (*input_line_pointer != '"')
3861 goto bad_string;
3862 input_line_pointer++;
3863 s = input_line_pointer;
3864 while (*input_line_pointer && *input_line_pointer != '"')
3865 input_line_pointer++;
3866 if (*input_line_pointer != '"')
3867 goto bad_string;
3868 saved_char = *input_line_pointer;
3869 *input_line_pointer = 0;
3870 }
3871 else
3872 {
3873 s = NULL;
3874 saved_char = 0;
3875 }
3876
3877 if (tag == Tag_compatibility)
3878 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3879 else if (is_string)
3880 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3881 else
3882 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3883
3884 if (s)
3885 {
3886 *input_line_pointer = saved_char;
3887 input_line_pointer++;
3888 }
3889 demand_empty_rest_of_line ();
3890 return;
3891 bad_string:
3892 as_bad (_("bad string constant"));
3893 ignore_rest_of_line ();
3894 return;
3895 bad:
3896 as_bad (_("expected <tag> , <value>"));
3897 ignore_rest_of_line ();
3898 }
3899 #endif /* OBJ_ELF */
3900
3901 static void s_arm_arch (int);
3902 static void s_arm_object_arch (int);
3903 static void s_arm_cpu (int);
3904 static void s_arm_fpu (int);
3905
3906 #ifdef TE_PE
3907
3908 static void
3909 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3910 {
3911 expressionS exp;
3912
3913 do
3914 {
3915 expression (&exp);
3916 if (exp.X_op == O_symbol)
3917 exp.X_op = O_secrel;
3918
3919 emit_expr (&exp, 4);
3920 }
3921 while (*input_line_pointer++ == ',');
3922
3923 input_line_pointer--;
3924 demand_empty_rest_of_line ();
3925 }
3926 #endif /* TE_PE */
3927
3928 /* This table describes all the machine specific pseudo-ops the assembler
3929 has to support. The fields are:
3930 pseudo-op name without dot
3931 function to call to execute this pseudo-op
3932 Integer arg to pass to the function. */
3933
3934 const pseudo_typeS md_pseudo_table[] =
3935 {
3936 /* Never called because '.req' does not start a line. */
3937 { "req", s_req, 0 },
3938 /* Following two are likewise never called. */
3939 { "dn", s_dn, 0 },
3940 { "qn", s_qn, 0 },
3941 { "unreq", s_unreq, 0 },
3942 { "bss", s_bss, 0 },
3943 { "align", s_align, 0 },
3944 { "arm", s_arm, 0 },
3945 { "thumb", s_thumb, 0 },
3946 { "code", s_code, 0 },
3947 { "force_thumb", s_force_thumb, 0 },
3948 { "thumb_func", s_thumb_func, 0 },
3949 { "thumb_set", s_thumb_set, 0 },
3950 { "even", s_even, 0 },
3951 { "ltorg", s_ltorg, 0 },
3952 { "pool", s_ltorg, 0 },
3953 { "syntax", s_syntax, 0 },
3954 { "cpu", s_arm_cpu, 0 },
3955 { "arch", s_arm_arch, 0 },
3956 { "object_arch", s_arm_object_arch, 0 },
3957 { "fpu", s_arm_fpu, 0 },
3958 #ifdef OBJ_ELF
3959 { "word", s_arm_elf_cons, 4 },
3960 { "long", s_arm_elf_cons, 4 },
3961 { "rel31", s_arm_rel31, 0 },
3962 { "fnstart", s_arm_unwind_fnstart, 0 },
3963 { "fnend", s_arm_unwind_fnend, 0 },
3964 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3965 { "personality", s_arm_unwind_personality, 0 },
3966 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3967 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3968 { "save", s_arm_unwind_save, 0 },
3969 { "vsave", s_arm_unwind_save, 1 },
3970 { "movsp", s_arm_unwind_movsp, 0 },
3971 { "pad", s_arm_unwind_pad, 0 },
3972 { "setfp", s_arm_unwind_setfp, 0 },
3973 { "unwind_raw", s_arm_unwind_raw, 0 },
3974 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3975 #else
3976 { "word", cons, 4},
3977
3978 /* These are used for dwarf. */
3979 {"2byte", cons, 2},
3980 {"4byte", cons, 4},
3981 {"8byte", cons, 8},
3982 /* These are used for dwarf2. */
3983 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3984 { "loc", dwarf2_directive_loc, 0 },
3985 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3986 #endif
3987 { "extend", float_cons, 'x' },
3988 { "ldouble", float_cons, 'x' },
3989 { "packed", float_cons, 'p' },
3990 #ifdef TE_PE
3991 {"secrel32", pe_directive_secrel, 0},
3992 #endif
3993 { 0, 0, 0 }
3994 };
3995 \f
3996 /* Parser functions used exclusively in instruction operands. */
3997
3998 /* Generic immediate-value read function for use in insn parsing.
3999 STR points to the beginning of the immediate (the leading #);
4000 VAL receives the value; if the value is outside [MIN, MAX]
4001 issue an error. PREFIX_OPT is true if the immediate prefix is
4002 optional. */
4003
4004 static int
4005 parse_immediate (char **str, int *val, int min, int max,
4006 bfd_boolean prefix_opt)
4007 {
4008 expressionS exp;
4009 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4010 if (exp.X_op != O_constant)
4011 {
4012 inst.error = _("constant expression required");
4013 return FAIL;
4014 }
4015
4016 if (exp.X_add_number < min || exp.X_add_number > max)
4017 {
4018 inst.error = _("immediate value out of range");
4019 return FAIL;
4020 }
4021
4022 *val = exp.X_add_number;
4023 return SUCCESS;
4024 }
4025
4026 /* Less-generic immediate-value read function with the possibility of loading a
4027 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4028 instructions. Puts the result directly in inst.operands[i]. */
4029
4030 static int
4031 parse_big_immediate (char **str, int i)
4032 {
4033 expressionS exp;
4034 char *ptr = *str;
4035
4036 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4037
4038 if (exp.X_op == O_constant)
4039 {
4040 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4041 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4042 O_constant. We have to be careful not to break compilation for
4043 32-bit X_add_number, though. */
4044 if ((exp.X_add_number & ~0xffffffffl) != 0)
4045 {
4046 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4047 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4048 inst.operands[i].regisimm = 1;
4049 }
4050 }
4051 else if (exp.X_op == O_big
4052 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4053 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4054 {
4055 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4056 /* Bignums have their least significant bits in
4057 generic_bignum[0]. Make sure we put 32 bits in imm and
4058 32 bits in reg, in a (hopefully) portable way. */
4059 assert (parts != 0);
4060 inst.operands[i].imm = 0;
4061 for (j = 0; j < parts; j++, idx++)
4062 inst.operands[i].imm |= generic_bignum[idx]
4063 << (LITTLENUM_NUMBER_OF_BITS * j);
4064 inst.operands[i].reg = 0;
4065 for (j = 0; j < parts; j++, idx++)
4066 inst.operands[i].reg |= generic_bignum[idx]
4067 << (LITTLENUM_NUMBER_OF_BITS * j);
4068 inst.operands[i].regisimm = 1;
4069 }
4070 else
4071 return FAIL;
4072
4073 *str = ptr;
4074
4075 return SUCCESS;
4076 }
4077
4078 /* Returns the pseudo-register number of an FPA immediate constant,
4079 or FAIL if there isn't a valid constant here. */
4080
4081 static int
4082 parse_fpa_immediate (char ** str)
4083 {
4084 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4085 char * save_in;
4086 expressionS exp;
4087 int i;
4088 int j;
4089
4090 /* First try and match exact strings, this is to guarantee
4091 that some formats will work even for cross assembly. */
4092
4093 for (i = 0; fp_const[i]; i++)
4094 {
4095 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4096 {
4097 char *start = *str;
4098
4099 *str += strlen (fp_const[i]);
4100 if (is_end_of_line[(unsigned char) **str])
4101 return i + 8;
4102 *str = start;
4103 }
4104 }
4105
4106 /* Just because we didn't get a match doesn't mean that the constant
4107 isn't valid, just that it is in a format that we don't
4108 automatically recognize. Try parsing it with the standard
4109 expression routines. */
4110
4111 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4112
4113 /* Look for a raw floating point number. */
4114 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4115 && is_end_of_line[(unsigned char) *save_in])
4116 {
4117 for (i = 0; i < NUM_FLOAT_VALS; i++)
4118 {
4119 for (j = 0; j < MAX_LITTLENUMS; j++)
4120 {
4121 if (words[j] != fp_values[i][j])
4122 break;
4123 }
4124
4125 if (j == MAX_LITTLENUMS)
4126 {
4127 *str = save_in;
4128 return i + 8;
4129 }
4130 }
4131 }
4132
4133 /* Try and parse a more complex expression, this will probably fail
4134 unless the code uses a floating point prefix (eg "0f"). */
4135 save_in = input_line_pointer;
4136 input_line_pointer = *str;
4137 if (expression (&exp) == absolute_section
4138 && exp.X_op == O_big
4139 && exp.X_add_number < 0)
4140 {
4141 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4142 Ditto for 15. */
4143 if (gen_to_words (words, 5, (long) 15) == 0)
4144 {
4145 for (i = 0; i < NUM_FLOAT_VALS; i++)
4146 {
4147 for (j = 0; j < MAX_LITTLENUMS; j++)
4148 {
4149 if (words[j] != fp_values[i][j])
4150 break;
4151 }
4152
4153 if (j == MAX_LITTLENUMS)
4154 {
4155 *str = input_line_pointer;
4156 input_line_pointer = save_in;
4157 return i + 8;
4158 }
4159 }
4160 }
4161 }
4162
4163 *str = input_line_pointer;
4164 input_line_pointer = save_in;
4165 inst.error = _("invalid FPA immediate expression");
4166 return FAIL;
4167 }
4168
4169 /* Returns 1 if a number has "quarter-precision" float format
4170 0baBbbbbbc defgh000 00000000 00000000. */
4171
4172 static int
4173 is_quarter_float (unsigned imm)
4174 {
4175 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4176 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4177 }
4178
4179 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4180 0baBbbbbbc defgh000 00000000 00000000.
4181 The zero and minus-zero cases need special handling, since they can't be
4182 encoded in the "quarter-precision" float format, but can nonetheless be
4183 loaded as integer constants. */
4184
4185 static unsigned
4186 parse_qfloat_immediate (char **ccp, int *immed)
4187 {
4188 char *str = *ccp;
4189 char *fpnum;
4190 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4191 int found_fpchar = 0;
4192
4193 skip_past_char (&str, '#');
4194
4195 /* We must not accidentally parse an integer as a floating-point number. Make
4196 sure that the value we parse is not an integer by checking for special
4197 characters '.' or 'e'.
4198 FIXME: This is a horrible hack, but doing better is tricky because type
4199 information isn't in a very usable state at parse time. */
4200 fpnum = str;
4201 skip_whitespace (fpnum);
4202
4203 if (strncmp (fpnum, "0x", 2) == 0)
4204 return FAIL;
4205 else
4206 {
4207 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4208 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4209 {
4210 found_fpchar = 1;
4211 break;
4212 }
4213
4214 if (!found_fpchar)
4215 return FAIL;
4216 }
4217
4218 if ((str = atof_ieee (str, 's', words)) != NULL)
4219 {
4220 unsigned fpword = 0;
4221 int i;
4222
4223 /* Our FP word must be 32 bits (single-precision FP). */
4224 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4225 {
4226 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4227 fpword |= words[i];
4228 }
4229
4230 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4231 *immed = fpword;
4232 else
4233 return FAIL;
4234
4235 *ccp = str;
4236
4237 return SUCCESS;
4238 }
4239
4240 return FAIL;
4241 }
4242
4243 /* Shift operands. */
4244 enum shift_kind
4245 {
4246 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4247 };
4248
4249 struct asm_shift_name
4250 {
4251 const char *name;
4252 enum shift_kind kind;
4253 };
4254
4255 /* Third argument to parse_shift. */
4256 enum parse_shift_mode
4257 {
4258 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4259 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4260 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4261 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4262 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4263 };
4264
4265 /* Parse a <shift> specifier on an ARM data processing instruction.
4266 This has three forms:
4267
4268 (LSL|LSR|ASL|ASR|ROR) Rs
4269 (LSL|LSR|ASL|ASR|ROR) #imm
4270 RRX
4271
4272 Note that ASL is assimilated to LSL in the instruction encoding, and
4273 RRX to ROR #0 (which cannot be written as such). */
4274
4275 static int
4276 parse_shift (char **str, int i, enum parse_shift_mode mode)
4277 {
4278 const struct asm_shift_name *shift_name;
4279 enum shift_kind shift;
4280 char *s = *str;
4281 char *p = s;
4282 int reg;
4283
4284 for (p = *str; ISALPHA (*p); p++)
4285 ;
4286
4287 if (p == *str)
4288 {
4289 inst.error = _("shift expression expected");
4290 return FAIL;
4291 }
4292
4293 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4294
4295 if (shift_name == NULL)
4296 {
4297 inst.error = _("shift expression expected");
4298 return FAIL;
4299 }
4300
4301 shift = shift_name->kind;
4302
4303 switch (mode)
4304 {
4305 case NO_SHIFT_RESTRICT:
4306 case SHIFT_IMMEDIATE: break;
4307
4308 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4309 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4310 {
4311 inst.error = _("'LSL' or 'ASR' required");
4312 return FAIL;
4313 }
4314 break;
4315
4316 case SHIFT_LSL_IMMEDIATE:
4317 if (shift != SHIFT_LSL)
4318 {
4319 inst.error = _("'LSL' required");
4320 return FAIL;
4321 }
4322 break;
4323
4324 case SHIFT_ASR_IMMEDIATE:
4325 if (shift != SHIFT_ASR)
4326 {
4327 inst.error = _("'ASR' required");
4328 return FAIL;
4329 }
4330 break;
4331
4332 default: abort ();
4333 }
4334
4335 if (shift != SHIFT_RRX)
4336 {
4337 /* Whitespace can appear here if the next thing is a bare digit. */
4338 skip_whitespace (p);
4339
4340 if (mode == NO_SHIFT_RESTRICT
4341 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4342 {
4343 inst.operands[i].imm = reg;
4344 inst.operands[i].immisreg = 1;
4345 }
4346 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4347 return FAIL;
4348 }
4349 inst.operands[i].shift_kind = shift;
4350 inst.operands[i].shifted = 1;
4351 *str = p;
4352 return SUCCESS;
4353 }
4354
4355 /* Parse a <shifter_operand> for an ARM data processing instruction:
4356
4357 #<immediate>
4358 #<immediate>, <rotate>
4359 <Rm>
4360 <Rm>, <shift>
4361
4362 where <shift> is defined by parse_shift above, and <rotate> is a
4363 multiple of 2 between 0 and 30. Validation of immediate operands
4364 is deferred to md_apply_fix. */
4365
4366 static int
4367 parse_shifter_operand (char **str, int i)
4368 {
4369 int value;
4370 expressionS expr;
4371
4372 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4373 {
4374 inst.operands[i].reg = value;
4375 inst.operands[i].isreg = 1;
4376
4377 /* parse_shift will override this if appropriate */
4378 inst.reloc.exp.X_op = O_constant;
4379 inst.reloc.exp.X_add_number = 0;
4380
4381 if (skip_past_comma (str) == FAIL)
4382 return SUCCESS;
4383
4384 /* Shift operation on register. */
4385 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4386 }
4387
4388 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4389 return FAIL;
4390
4391 if (skip_past_comma (str) == SUCCESS)
4392 {
4393 /* #x, y -- ie explicit rotation by Y. */
4394 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4395 return FAIL;
4396
4397 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4398 {
4399 inst.error = _("constant expression expected");
4400 return FAIL;
4401 }
4402
4403 value = expr.X_add_number;
4404 if (value < 0 || value > 30 || value % 2 != 0)
4405 {
4406 inst.error = _("invalid rotation");
4407 return FAIL;
4408 }
4409 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4410 {
4411 inst.error = _("invalid constant");
4412 return FAIL;
4413 }
4414
4415 /* Convert to decoded value. md_apply_fix will put it back. */
4416 inst.reloc.exp.X_add_number
4417 = (((inst.reloc.exp.X_add_number << (32 - value))
4418 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4419 }
4420
4421 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4422 inst.reloc.pc_rel = 0;
4423 return SUCCESS;
4424 }
4425
4426 /* Group relocation information. Each entry in the table contains the
4427 textual name of the relocation as may appear in assembler source
4428 and must end with a colon.
4429 Along with this textual name are the relocation codes to be used if
4430 the corresponding instruction is an ALU instruction (ADD or SUB only),
4431 an LDR, an LDRS, or an LDC. */
4432
4433 struct group_reloc_table_entry
4434 {
4435 const char *name;
4436 int alu_code;
4437 int ldr_code;
4438 int ldrs_code;
4439 int ldc_code;
4440 };
4441
4442 typedef enum
4443 {
4444 /* Varieties of non-ALU group relocation. */
4445
4446 GROUP_LDR,
4447 GROUP_LDRS,
4448 GROUP_LDC
4449 } group_reloc_type;
4450
4451 static struct group_reloc_table_entry group_reloc_table[] =
4452 { /* Program counter relative: */
4453 { "pc_g0_nc",
4454 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4455 0, /* LDR */
4456 0, /* LDRS */
4457 0 }, /* LDC */
4458 { "pc_g0",
4459 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4460 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4461 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4462 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4463 { "pc_g1_nc",
4464 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4465 0, /* LDR */
4466 0, /* LDRS */
4467 0 }, /* LDC */
4468 { "pc_g1",
4469 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4470 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4471 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4472 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4473 { "pc_g2",
4474 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4475 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4476 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4477 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4478 /* Section base relative */
4479 { "sb_g0_nc",
4480 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4481 0, /* LDR */
4482 0, /* LDRS */
4483 0 }, /* LDC */
4484 { "sb_g0",
4485 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4486 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4487 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4488 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4489 { "sb_g1_nc",
4490 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4491 0, /* LDR */
4492 0, /* LDRS */
4493 0 }, /* LDC */
4494 { "sb_g1",
4495 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4496 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4497 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4498 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4499 { "sb_g2",
4500 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4501 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4502 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4503 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4504
4505 /* Given the address of a pointer pointing to the textual name of a group
4506 relocation as may appear in assembler source, attempt to find its details
4507 in group_reloc_table. The pointer will be updated to the character after
4508 the trailing colon. On failure, FAIL will be returned; SUCCESS
4509 otherwise. On success, *entry will be updated to point at the relevant
4510 group_reloc_table entry. */
4511
4512 static int
4513 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4514 {
4515 unsigned int i;
4516 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4517 {
4518 int length = strlen (group_reloc_table[i].name);
4519
4520 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 &&
4521 (*str)[length] == ':')
4522 {
4523 *out = &group_reloc_table[i];
4524 *str += (length + 1);
4525 return SUCCESS;
4526 }
4527 }
4528
4529 return FAIL;
4530 }
4531
4532 /* Parse a <shifter_operand> for an ARM data processing instruction
4533 (as for parse_shifter_operand) where group relocations are allowed:
4534
4535 #<immediate>
4536 #<immediate>, <rotate>
4537 #:<group_reloc>:<expression>
4538 <Rm>
4539 <Rm>, <shift>
4540
4541 where <group_reloc> is one of the strings defined in group_reloc_table.
4542 The hashes are optional.
4543
4544 Everything else is as for parse_shifter_operand. */
4545
4546 static parse_operand_result
4547 parse_shifter_operand_group_reloc (char **str, int i)
4548 {
4549 /* Determine if we have the sequence of characters #: or just :
4550 coming next. If we do, then we check for a group relocation.
4551 If we don't, punt the whole lot to parse_shifter_operand. */
4552
4553 if (((*str)[0] == '#' && (*str)[1] == ':')
4554 || (*str)[0] == ':')
4555 {
4556 struct group_reloc_table_entry *entry;
4557
4558 if ((*str)[0] == '#')
4559 (*str) += 2;
4560 else
4561 (*str)++;
4562
4563 /* Try to parse a group relocation. Anything else is an error. */
4564 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4565 {
4566 inst.error = _("unknown group relocation");
4567 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4568 }
4569
4570 /* We now have the group relocation table entry corresponding to
4571 the name in the assembler source. Next, we parse the expression. */
4572 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4573 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4574
4575 /* Record the relocation type (always the ALU variant here). */
4576 inst.reloc.type = entry->alu_code;
4577 assert (inst.reloc.type != 0);
4578
4579 return PARSE_OPERAND_SUCCESS;
4580 }
4581 else
4582 return parse_shifter_operand (str, i) == SUCCESS
4583 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4584
4585 /* Never reached. */
4586 }
4587
4588 /* Parse all forms of an ARM address expression. Information is written
4589 to inst.operands[i] and/or inst.reloc.
4590
4591 Preindexed addressing (.preind=1):
4592
4593 [Rn, #offset] .reg=Rn .reloc.exp=offset
4594 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4595 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4596 .shift_kind=shift .reloc.exp=shift_imm
4597
4598 These three may have a trailing ! which causes .writeback to be set also.
4599
4600 Postindexed addressing (.postind=1, .writeback=1):
4601
4602 [Rn], #offset .reg=Rn .reloc.exp=offset
4603 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4604 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4605 .shift_kind=shift .reloc.exp=shift_imm
4606
4607 Unindexed addressing (.preind=0, .postind=0):
4608
4609 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4610
4611 Other:
4612
4613 [Rn]{!} shorthand for [Rn,#0]{!}
4614 =immediate .isreg=0 .reloc.exp=immediate
4615 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4616
4617 It is the caller's responsibility to check for addressing modes not
4618 supported by the instruction, and to set inst.reloc.type. */
4619
4620 static parse_operand_result
4621 parse_address_main (char **str, int i, int group_relocations,
4622 group_reloc_type group_type)
4623 {
4624 char *p = *str;
4625 int reg;
4626
4627 if (skip_past_char (&p, '[') == FAIL)
4628 {
4629 if (skip_past_char (&p, '=') == FAIL)
4630 {
4631 /* bare address - translate to PC-relative offset */
4632 inst.reloc.pc_rel = 1;
4633 inst.operands[i].reg = REG_PC;
4634 inst.operands[i].isreg = 1;
4635 inst.operands[i].preind = 1;
4636 }
4637 /* else a load-constant pseudo op, no special treatment needed here */
4638
4639 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4640 return PARSE_OPERAND_FAIL;
4641
4642 *str = p;
4643 return PARSE_OPERAND_SUCCESS;
4644 }
4645
4646 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4647 {
4648 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4649 return PARSE_OPERAND_FAIL;
4650 }
4651 inst.operands[i].reg = reg;
4652 inst.operands[i].isreg = 1;
4653
4654 if (skip_past_comma (&p) == SUCCESS)
4655 {
4656 inst.operands[i].preind = 1;
4657
4658 if (*p == '+') p++;
4659 else if (*p == '-') p++, inst.operands[i].negative = 1;
4660
4661 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4662 {
4663 inst.operands[i].imm = reg;
4664 inst.operands[i].immisreg = 1;
4665
4666 if (skip_past_comma (&p) == SUCCESS)
4667 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4668 return PARSE_OPERAND_FAIL;
4669 }
4670 else if (skip_past_char (&p, ':') == SUCCESS)
4671 {
4672 /* FIXME: '@' should be used here, but it's filtered out by generic
4673 code before we get to see it here. This may be subject to
4674 change. */
4675 expressionS exp;
4676 my_get_expression (&exp, &p, GE_NO_PREFIX);
4677 if (exp.X_op != O_constant)
4678 {
4679 inst.error = _("alignment must be constant");
4680 return PARSE_OPERAND_FAIL;
4681 }
4682 inst.operands[i].imm = exp.X_add_number << 8;
4683 inst.operands[i].immisalign = 1;
4684 /* Alignments are not pre-indexes. */
4685 inst.operands[i].preind = 0;
4686 }
4687 else
4688 {
4689 if (inst.operands[i].negative)
4690 {
4691 inst.operands[i].negative = 0;
4692 p--;
4693 }
4694
4695 if (group_relocations &&
4696 ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4697
4698 {
4699 struct group_reloc_table_entry *entry;
4700
4701 /* Skip over the #: or : sequence. */
4702 if (*p == '#')
4703 p += 2;
4704 else
4705 p++;
4706
4707 /* Try to parse a group relocation. Anything else is an
4708 error. */
4709 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4710 {
4711 inst.error = _("unknown group relocation");
4712 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4713 }
4714
4715 /* We now have the group relocation table entry corresponding to
4716 the name in the assembler source. Next, we parse the
4717 expression. */
4718 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4719 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4720
4721 /* Record the relocation type. */
4722 switch (group_type)
4723 {
4724 case GROUP_LDR:
4725 inst.reloc.type = entry->ldr_code;
4726 break;
4727
4728 case GROUP_LDRS:
4729 inst.reloc.type = entry->ldrs_code;
4730 break;
4731
4732 case GROUP_LDC:
4733 inst.reloc.type = entry->ldc_code;
4734 break;
4735
4736 default:
4737 assert (0);
4738 }
4739
4740 if (inst.reloc.type == 0)
4741 {
4742 inst.error = _("this group relocation is not allowed on this instruction");
4743 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4744 }
4745 }
4746 else
4747 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4748 return PARSE_OPERAND_FAIL;
4749 }
4750 }
4751
4752 if (skip_past_char (&p, ']') == FAIL)
4753 {
4754 inst.error = _("']' expected");
4755 return PARSE_OPERAND_FAIL;
4756 }
4757
4758 if (skip_past_char (&p, '!') == SUCCESS)
4759 inst.operands[i].writeback = 1;
4760
4761 else if (skip_past_comma (&p) == SUCCESS)
4762 {
4763 if (skip_past_char (&p, '{') == SUCCESS)
4764 {
4765 /* [Rn], {expr} - unindexed, with option */
4766 if (parse_immediate (&p, &inst.operands[i].imm,
4767 0, 255, TRUE) == FAIL)
4768 return PARSE_OPERAND_FAIL;
4769
4770 if (skip_past_char (&p, '}') == FAIL)
4771 {
4772 inst.error = _("'}' expected at end of 'option' field");
4773 return PARSE_OPERAND_FAIL;
4774 }
4775 if (inst.operands[i].preind)
4776 {
4777 inst.error = _("cannot combine index with option");
4778 return PARSE_OPERAND_FAIL;
4779 }
4780 *str = p;
4781 return PARSE_OPERAND_SUCCESS;
4782 }
4783 else
4784 {
4785 inst.operands[i].postind = 1;
4786 inst.operands[i].writeback = 1;
4787
4788 if (inst.operands[i].preind)
4789 {
4790 inst.error = _("cannot combine pre- and post-indexing");
4791 return PARSE_OPERAND_FAIL;
4792 }
4793
4794 if (*p == '+') p++;
4795 else if (*p == '-') p++, inst.operands[i].negative = 1;
4796
4797 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4798 {
4799 /* We might be using the immediate for alignment already. If we
4800 are, OR the register number into the low-order bits. */
4801 if (inst.operands[i].immisalign)
4802 inst.operands[i].imm |= reg;
4803 else
4804 inst.operands[i].imm = reg;
4805 inst.operands[i].immisreg = 1;
4806
4807 if (skip_past_comma (&p) == SUCCESS)
4808 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4809 return PARSE_OPERAND_FAIL;
4810 }
4811 else
4812 {
4813 if (inst.operands[i].negative)
4814 {
4815 inst.operands[i].negative = 0;
4816 p--;
4817 }
4818 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4819 return PARSE_OPERAND_FAIL;
4820 }
4821 }
4822 }
4823
4824 /* If at this point neither .preind nor .postind is set, we have a
4825 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4826 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4827 {
4828 inst.operands[i].preind = 1;
4829 inst.reloc.exp.X_op = O_constant;
4830 inst.reloc.exp.X_add_number = 0;
4831 }
4832 *str = p;
4833 return PARSE_OPERAND_SUCCESS;
4834 }
4835
4836 static int
4837 parse_address (char **str, int i)
4838 {
4839 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4840 ? SUCCESS : FAIL;
4841 }
4842
4843 static parse_operand_result
4844 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4845 {
4846 return parse_address_main (str, i, 1, type);
4847 }
4848
4849 /* Parse an operand for a MOVW or MOVT instruction. */
4850 static int
4851 parse_half (char **str)
4852 {
4853 char * p;
4854
4855 p = *str;
4856 skip_past_char (&p, '#');
4857 if (strncasecmp (p, ":lower16:", 9) == 0)
4858 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4859 else if (strncasecmp (p, ":upper16:", 9) == 0)
4860 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4861
4862 if (inst.reloc.type != BFD_RELOC_UNUSED)
4863 {
4864 p += 9;
4865 skip_whitespace(p);
4866 }
4867
4868 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4869 return FAIL;
4870
4871 if (inst.reloc.type == BFD_RELOC_UNUSED)
4872 {
4873 if (inst.reloc.exp.X_op != O_constant)
4874 {
4875 inst.error = _("constant expression expected");
4876 return FAIL;
4877 }
4878 if (inst.reloc.exp.X_add_number < 0
4879 || inst.reloc.exp.X_add_number > 0xffff)
4880 {
4881 inst.error = _("immediate value out of range");
4882 return FAIL;
4883 }
4884 }
4885 *str = p;
4886 return SUCCESS;
4887 }
4888
4889 /* Miscellaneous. */
4890
4891 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4892 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4893 static int
4894 parse_psr (char **str)
4895 {
4896 char *p;
4897 unsigned long psr_field;
4898 const struct asm_psr *psr;
4899 char *start;
4900
4901 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4902 feature for ease of use and backwards compatibility. */
4903 p = *str;
4904 if (strncasecmp (p, "SPSR", 4) == 0)
4905 psr_field = SPSR_BIT;
4906 else if (strncasecmp (p, "CPSR", 4) == 0)
4907 psr_field = 0;
4908 else
4909 {
4910 start = p;
4911 do
4912 p++;
4913 while (ISALNUM (*p) || *p == '_');
4914
4915 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4916 if (!psr)
4917 return FAIL;
4918
4919 *str = p;
4920 return psr->field;
4921 }
4922
4923 p += 4;
4924 if (*p == '_')
4925 {
4926 /* A suffix follows. */
4927 p++;
4928 start = p;
4929
4930 do
4931 p++;
4932 while (ISALNUM (*p) || *p == '_');
4933
4934 psr = hash_find_n (arm_psr_hsh, start, p - start);
4935 if (!psr)
4936 goto error;
4937
4938 psr_field |= psr->field;
4939 }
4940 else
4941 {
4942 if (ISALNUM (*p))
4943 goto error; /* Garbage after "[CS]PSR". */
4944
4945 psr_field |= (PSR_c | PSR_f);
4946 }
4947 *str = p;
4948 return psr_field;
4949
4950 error:
4951 inst.error = _("flag for {c}psr instruction expected");
4952 return FAIL;
4953 }
4954
4955 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4956 value suitable for splatting into the AIF field of the instruction. */
4957
4958 static int
4959 parse_cps_flags (char **str)
4960 {
4961 int val = 0;
4962 int saw_a_flag = 0;
4963 char *s = *str;
4964
4965 for (;;)
4966 switch (*s++)
4967 {
4968 case '\0': case ',':
4969 goto done;
4970
4971 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4972 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4973 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4974
4975 default:
4976 inst.error = _("unrecognized CPS flag");
4977 return FAIL;
4978 }
4979
4980 done:
4981 if (saw_a_flag == 0)
4982 {
4983 inst.error = _("missing CPS flags");
4984 return FAIL;
4985 }
4986
4987 *str = s - 1;
4988 return val;
4989 }
4990
4991 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4992 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4993
4994 static int
4995 parse_endian_specifier (char **str)
4996 {
4997 int little_endian;
4998 char *s = *str;
4999
5000 if (strncasecmp (s, "BE", 2))
5001 little_endian = 0;
5002 else if (strncasecmp (s, "LE", 2))
5003 little_endian = 1;
5004 else
5005 {
5006 inst.error = _("valid endian specifiers are be or le");
5007 return FAIL;
5008 }
5009
5010 if (ISALNUM (s[2]) || s[2] == '_')
5011 {
5012 inst.error = _("valid endian specifiers are be or le");
5013 return FAIL;
5014 }
5015
5016 *str = s + 2;
5017 return little_endian;
5018 }
5019
5020 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5021 value suitable for poking into the rotate field of an sxt or sxta
5022 instruction, or FAIL on error. */
5023
5024 static int
5025 parse_ror (char **str)
5026 {
5027 int rot;
5028 char *s = *str;
5029
5030 if (strncasecmp (s, "ROR", 3) == 0)
5031 s += 3;
5032 else
5033 {
5034 inst.error = _("missing rotation field after comma");
5035 return FAIL;
5036 }
5037
5038 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5039 return FAIL;
5040
5041 switch (rot)
5042 {
5043 case 0: *str = s; return 0x0;
5044 case 8: *str = s; return 0x1;
5045 case 16: *str = s; return 0x2;
5046 case 24: *str = s; return 0x3;
5047
5048 default:
5049 inst.error = _("rotation can only be 0, 8, 16, or 24");
5050 return FAIL;
5051 }
5052 }
5053
5054 /* Parse a conditional code (from conds[] below). The value returned is in the
5055 range 0 .. 14, or FAIL. */
5056 static int
5057 parse_cond (char **str)
5058 {
5059 char *p, *q;
5060 const struct asm_cond *c;
5061
5062 p = q = *str;
5063 while (ISALPHA (*q))
5064 q++;
5065
5066 c = hash_find_n (arm_cond_hsh, p, q - p);
5067 if (!c)
5068 {
5069 inst.error = _("condition required");
5070 return FAIL;
5071 }
5072
5073 *str = q;
5074 return c->value;
5075 }
5076
5077 /* Parse an option for a barrier instruction. Returns the encoding for the
5078 option, or FAIL. */
5079 static int
5080 parse_barrier (char **str)
5081 {
5082 char *p, *q;
5083 const struct asm_barrier_opt *o;
5084
5085 p = q = *str;
5086 while (ISALPHA (*q))
5087 q++;
5088
5089 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5090 if (!o)
5091 return FAIL;
5092
5093 *str = q;
5094 return o->value;
5095 }
5096
5097 /* Parse the operands of a table branch instruction. Similar to a memory
5098 operand. */
5099 static int
5100 parse_tb (char **str)
5101 {
5102 char * p = *str;
5103 int reg;
5104
5105 if (skip_past_char (&p, '[') == FAIL)
5106 {
5107 inst.error = _("'[' expected");
5108 return FAIL;
5109 }
5110
5111 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5112 {
5113 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5114 return FAIL;
5115 }
5116 inst.operands[0].reg = reg;
5117
5118 if (skip_past_comma (&p) == FAIL)
5119 {
5120 inst.error = _("',' expected");
5121 return FAIL;
5122 }
5123
5124 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5125 {
5126 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5127 return FAIL;
5128 }
5129 inst.operands[0].imm = reg;
5130
5131 if (skip_past_comma (&p) == SUCCESS)
5132 {
5133 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5134 return FAIL;
5135 if (inst.reloc.exp.X_add_number != 1)
5136 {
5137 inst.error = _("invalid shift");
5138 return FAIL;
5139 }
5140 inst.operands[0].shifted = 1;
5141 }
5142
5143 if (skip_past_char (&p, ']') == FAIL)
5144 {
5145 inst.error = _("']' expected");
5146 return FAIL;
5147 }
5148 *str = p;
5149 return SUCCESS;
5150 }
5151
5152 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5153 information on the types the operands can take and how they are encoded.
5154 Up to four operands may be read; this function handles setting the
5155 ".present" field for each read operand itself.
5156 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5157 else returns FAIL. */
5158
5159 static int
5160 parse_neon_mov (char **str, int *which_operand)
5161 {
5162 int i = *which_operand, val;
5163 enum arm_reg_type rtype;
5164 char *ptr = *str;
5165 struct neon_type_el optype;
5166
5167 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5168 {
5169 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5170 inst.operands[i].reg = val;
5171 inst.operands[i].isscalar = 1;
5172 inst.operands[i].vectype = optype;
5173 inst.operands[i++].present = 1;
5174
5175 if (skip_past_comma (&ptr) == FAIL)
5176 goto wanted_comma;
5177
5178 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5179 goto wanted_arm;
5180
5181 inst.operands[i].reg = val;
5182 inst.operands[i].isreg = 1;
5183 inst.operands[i].present = 1;
5184 }
5185 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5186 != FAIL)
5187 {
5188 /* Cases 0, 1, 2, 3, 5 (D only). */
5189 if (skip_past_comma (&ptr) == FAIL)
5190 goto wanted_comma;
5191
5192 inst.operands[i].reg = val;
5193 inst.operands[i].isreg = 1;
5194 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5195 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5196 inst.operands[i].isvec = 1;
5197 inst.operands[i].vectype = optype;
5198 inst.operands[i++].present = 1;
5199
5200 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5201 {
5202 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5203 Case 13: VMOV <Sd>, <Rm> */
5204 inst.operands[i].reg = val;
5205 inst.operands[i].isreg = 1;
5206 inst.operands[i].present = 1;
5207
5208 if (rtype == REG_TYPE_NQ)
5209 {
5210 first_error (_("can't use Neon quad register here"));
5211 return FAIL;
5212 }
5213 else if (rtype != REG_TYPE_VFS)
5214 {
5215 i++;
5216 if (skip_past_comma (&ptr) == FAIL)
5217 goto wanted_comma;
5218 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5219 goto wanted_arm;
5220 inst.operands[i].reg = val;
5221 inst.operands[i].isreg = 1;
5222 inst.operands[i].present = 1;
5223 }
5224 }
5225 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5226 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5227 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5228 Case 10: VMOV.F32 <Sd>, #<imm>
5229 Case 11: VMOV.F64 <Dd>, #<imm> */
5230 inst.operands[i].immisfloat = 1;
5231 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5232 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5233 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5234 ;
5235 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5236 &optype)) != FAIL)
5237 {
5238 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5239 Case 1: VMOV<c><q> <Dd>, <Dm>
5240 Case 8: VMOV.F32 <Sd>, <Sm>
5241 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5242
5243 inst.operands[i].reg = val;
5244 inst.operands[i].isreg = 1;
5245 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5246 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5247 inst.operands[i].isvec = 1;
5248 inst.operands[i].vectype = optype;
5249 inst.operands[i].present = 1;
5250
5251 if (skip_past_comma (&ptr) == SUCCESS)
5252 {
5253 /* Case 15. */
5254 i++;
5255
5256 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5257 goto wanted_arm;
5258
5259 inst.operands[i].reg = val;
5260 inst.operands[i].isreg = 1;
5261 inst.operands[i++].present = 1;
5262
5263 if (skip_past_comma (&ptr) == FAIL)
5264 goto wanted_comma;
5265
5266 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5267 goto wanted_arm;
5268
5269 inst.operands[i].reg = val;
5270 inst.operands[i].isreg = 1;
5271 inst.operands[i++].present = 1;
5272 }
5273 }
5274 else
5275 {
5276 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5277 return FAIL;
5278 }
5279 }
5280 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5281 {
5282 /* Cases 6, 7. */
5283 inst.operands[i].reg = val;
5284 inst.operands[i].isreg = 1;
5285 inst.operands[i++].present = 1;
5286
5287 if (skip_past_comma (&ptr) == FAIL)
5288 goto wanted_comma;
5289
5290 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5291 {
5292 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5293 inst.operands[i].reg = val;
5294 inst.operands[i].isscalar = 1;
5295 inst.operands[i].present = 1;
5296 inst.operands[i].vectype = optype;
5297 }
5298 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5299 {
5300 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5301 inst.operands[i].reg = val;
5302 inst.operands[i].isreg = 1;
5303 inst.operands[i++].present = 1;
5304
5305 if (skip_past_comma (&ptr) == FAIL)
5306 goto wanted_comma;
5307
5308 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5309 == FAIL)
5310 {
5311 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5312 return FAIL;
5313 }
5314
5315 inst.operands[i].reg = val;
5316 inst.operands[i].isreg = 1;
5317 inst.operands[i].isvec = 1;
5318 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5319 inst.operands[i].vectype = optype;
5320 inst.operands[i].present = 1;
5321
5322 if (rtype == REG_TYPE_VFS)
5323 {
5324 /* Case 14. */
5325 i++;
5326 if (skip_past_comma (&ptr) == FAIL)
5327 goto wanted_comma;
5328 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5329 &optype)) == FAIL)
5330 {
5331 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5332 return FAIL;
5333 }
5334 inst.operands[i].reg = val;
5335 inst.operands[i].isreg = 1;
5336 inst.operands[i].isvec = 1;
5337 inst.operands[i].issingle = 1;
5338 inst.operands[i].vectype = optype;
5339 inst.operands[i].present = 1;
5340 }
5341 }
5342 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5343 != FAIL)
5344 {
5345 /* Case 13. */
5346 inst.operands[i].reg = val;
5347 inst.operands[i].isreg = 1;
5348 inst.operands[i].isvec = 1;
5349 inst.operands[i].issingle = 1;
5350 inst.operands[i].vectype = optype;
5351 inst.operands[i++].present = 1;
5352 }
5353 }
5354 else
5355 {
5356 first_error (_("parse error"));
5357 return FAIL;
5358 }
5359
5360 /* Successfully parsed the operands. Update args. */
5361 *which_operand = i;
5362 *str = ptr;
5363 return SUCCESS;
5364
5365 wanted_comma:
5366 first_error (_("expected comma"));
5367 return FAIL;
5368
5369 wanted_arm:
5370 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5371 return FAIL;
5372 }
5373
5374 /* Matcher codes for parse_operands. */
5375 enum operand_parse_code
5376 {
5377 OP_stop, /* end of line */
5378
5379 OP_RR, /* ARM register */
5380 OP_RRnpc, /* ARM register, not r15 */
5381 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5382 OP_RRw, /* ARM register, not r15, optional trailing ! */
5383 OP_RCP, /* Coprocessor number */
5384 OP_RCN, /* Coprocessor register */
5385 OP_RF, /* FPA register */
5386 OP_RVS, /* VFP single precision register */
5387 OP_RVD, /* VFP double precision register (0..15) */
5388 OP_RND, /* Neon double precision register (0..31) */
5389 OP_RNQ, /* Neon quad precision register */
5390 OP_RVSD, /* VFP single or double precision register */
5391 OP_RNDQ, /* Neon double or quad precision register */
5392 OP_RNSDQ, /* Neon single, double or quad precision register */
5393 OP_RNSC, /* Neon scalar D[X] */
5394 OP_RVC, /* VFP control register */
5395 OP_RMF, /* Maverick F register */
5396 OP_RMD, /* Maverick D register */
5397 OP_RMFX, /* Maverick FX register */
5398 OP_RMDX, /* Maverick DX register */
5399 OP_RMAX, /* Maverick AX register */
5400 OP_RMDS, /* Maverick DSPSC register */
5401 OP_RIWR, /* iWMMXt wR register */
5402 OP_RIWC, /* iWMMXt wC register */
5403 OP_RIWG, /* iWMMXt wCG register */
5404 OP_RXA, /* XScale accumulator register */
5405
5406 OP_REGLST, /* ARM register list */
5407 OP_VRSLST, /* VFP single-precision register list */
5408 OP_VRDLST, /* VFP double-precision register list */
5409 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5410 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5411 OP_NSTRLST, /* Neon element/structure list */
5412
5413 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5414 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5415 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5416 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5417 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5418 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5419 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5420 OP_VMOV, /* Neon VMOV operands. */
5421 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5422 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5423 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5424
5425 OP_I0, /* immediate zero */
5426 OP_I7, /* immediate value 0 .. 7 */
5427 OP_I15, /* 0 .. 15 */
5428 OP_I16, /* 1 .. 16 */
5429 OP_I16z, /* 0 .. 16 */
5430 OP_I31, /* 0 .. 31 */
5431 OP_I31w, /* 0 .. 31, optional trailing ! */
5432 OP_I32, /* 1 .. 32 */
5433 OP_I32z, /* 0 .. 32 */
5434 OP_I63, /* 0 .. 63 */
5435 OP_I63s, /* -64 .. 63 */
5436 OP_I64, /* 1 .. 64 */
5437 OP_I64z, /* 0 .. 64 */
5438 OP_I255, /* 0 .. 255 */
5439
5440 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5441 OP_I7b, /* 0 .. 7 */
5442 OP_I15b, /* 0 .. 15 */
5443 OP_I31b, /* 0 .. 31 */
5444
5445 OP_SH, /* shifter operand */
5446 OP_SHG, /* shifter operand with possible group relocation */
5447 OP_ADDR, /* Memory address expression (any mode) */
5448 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5449 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5450 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5451 OP_EXP, /* arbitrary expression */
5452 OP_EXPi, /* same, with optional immediate prefix */
5453 OP_EXPr, /* same, with optional relocation suffix */
5454 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5455
5456 OP_CPSF, /* CPS flags */
5457 OP_ENDI, /* Endianness specifier */
5458 OP_PSR, /* CPSR/SPSR mask for msr */
5459 OP_COND, /* conditional code */
5460 OP_TB, /* Table branch. */
5461
5462 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5463 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5464
5465 OP_RRnpc_I0, /* ARM register or literal 0 */
5466 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5467 OP_RR_EXi, /* ARM register or expression with imm prefix */
5468 OP_RF_IF, /* FPA register or immediate */
5469 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5470 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5471
5472 /* Optional operands. */
5473 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5474 OP_oI31b, /* 0 .. 31 */
5475 OP_oI32b, /* 1 .. 32 */
5476 OP_oIffffb, /* 0 .. 65535 */
5477 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5478
5479 OP_oRR, /* ARM register */
5480 OP_oRRnpc, /* ARM register, not the PC */
5481 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5482 OP_oRND, /* Optional Neon double precision register */
5483 OP_oRNQ, /* Optional Neon quad precision register */
5484 OP_oRNDQ, /* Optional Neon double or quad precision register */
5485 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5486 OP_oSHll, /* LSL immediate */
5487 OP_oSHar, /* ASR immediate */
5488 OP_oSHllar, /* LSL or ASR immediate */
5489 OP_oROR, /* ROR 0/8/16/24 */
5490 OP_oBARRIER, /* Option argument for a barrier instruction. */
5491
5492 OP_FIRST_OPTIONAL = OP_oI7b
5493 };
5494
5495 /* Generic instruction operand parser. This does no encoding and no
5496 semantic validation; it merely squirrels values away in the inst
5497 structure. Returns SUCCESS or FAIL depending on whether the
5498 specified grammar matched. */
5499 static int
5500 parse_operands (char *str, const unsigned char *pattern)
5501 {
5502 unsigned const char *upat = pattern;
5503 char *backtrack_pos = 0;
5504 const char *backtrack_error = 0;
5505 int i, val, backtrack_index = 0;
5506 enum arm_reg_type rtype;
5507 parse_operand_result result;
5508
5509 #define po_char_or_fail(chr) do { \
5510 if (skip_past_char (&str, chr) == FAIL) \
5511 goto bad_args; \
5512 } while (0)
5513
5514 #define po_reg_or_fail(regtype) do { \
5515 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5516 &inst.operands[i].vectype); \
5517 if (val == FAIL) \
5518 { \
5519 first_error (_(reg_expected_msgs[regtype])); \
5520 goto failure; \
5521 } \
5522 inst.operands[i].reg = val; \
5523 inst.operands[i].isreg = 1; \
5524 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5525 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5526 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5527 || rtype == REG_TYPE_VFD \
5528 || rtype == REG_TYPE_NQ); \
5529 } while (0)
5530
5531 #define po_reg_or_goto(regtype, label) do { \
5532 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5533 &inst.operands[i].vectype); \
5534 if (val == FAIL) \
5535 goto label; \
5536 \
5537 inst.operands[i].reg = val; \
5538 inst.operands[i].isreg = 1; \
5539 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5540 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5541 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5542 || rtype == REG_TYPE_VFD \
5543 || rtype == REG_TYPE_NQ); \
5544 } while (0)
5545
5546 #define po_imm_or_fail(min, max, popt) do { \
5547 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5548 goto failure; \
5549 inst.operands[i].imm = val; \
5550 } while (0)
5551
5552 #define po_scalar_or_goto(elsz, label) do { \
5553 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5554 if (val == FAIL) \
5555 goto label; \
5556 inst.operands[i].reg = val; \
5557 inst.operands[i].isscalar = 1; \
5558 } while (0)
5559
5560 #define po_misc_or_fail(expr) do { \
5561 if (expr) \
5562 goto failure; \
5563 } while (0)
5564
5565 #define po_misc_or_fail_no_backtrack(expr) do { \
5566 result = expr; \
5567 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5568 backtrack_pos = 0; \
5569 if (result != PARSE_OPERAND_SUCCESS) \
5570 goto failure; \
5571 } while (0)
5572
5573 skip_whitespace (str);
5574
5575 for (i = 0; upat[i] != OP_stop; i++)
5576 {
5577 if (upat[i] >= OP_FIRST_OPTIONAL)
5578 {
5579 /* Remember where we are in case we need to backtrack. */
5580 assert (!backtrack_pos);
5581 backtrack_pos = str;
5582 backtrack_error = inst.error;
5583 backtrack_index = i;
5584 }
5585
5586 if (i > 0 && (i > 1 || inst.operands[0].present))
5587 po_char_or_fail (',');
5588
5589 switch (upat[i])
5590 {
5591 /* Registers */
5592 case OP_oRRnpc:
5593 case OP_RRnpc:
5594 case OP_oRR:
5595 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5596 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5597 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5598 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5599 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5600 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5601 case OP_oRND:
5602 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5603 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5604 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5605 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5606 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5607 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5608 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5609 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5610 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5611 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5612 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5613 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5614 case OP_oRNQ:
5615 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5616 case OP_oRNDQ:
5617 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5618 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5619 case OP_oRNSDQ:
5620 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5621
5622 /* Neon scalar. Using an element size of 8 means that some invalid
5623 scalars are accepted here, so deal with those in later code. */
5624 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5625
5626 /* WARNING: We can expand to two operands here. This has the potential
5627 to totally confuse the backtracking mechanism! It will be OK at
5628 least as long as we don't try to use optional args as well,
5629 though. */
5630 case OP_NILO:
5631 {
5632 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5633 inst.operands[i].present = 1;
5634 i++;
5635 skip_past_comma (&str);
5636 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5637 break;
5638 one_reg_only:
5639 /* Optional register operand was omitted. Unfortunately, it's in
5640 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5641 here (this is a bit grotty). */
5642 inst.operands[i] = inst.operands[i-1];
5643 inst.operands[i-1].present = 0;
5644 break;
5645 try_imm:
5646 /* There's a possibility of getting a 64-bit immediate here, so
5647 we need special handling. */
5648 if (parse_big_immediate (&str, i) == FAIL)
5649 {
5650 inst.error = _("immediate value is out of range");
5651 goto failure;
5652 }
5653 }
5654 break;
5655
5656 case OP_RNDQ_I0:
5657 {
5658 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5659 break;
5660 try_imm0:
5661 po_imm_or_fail (0, 0, TRUE);
5662 }
5663 break;
5664
5665 case OP_RVSD_I0:
5666 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5667 break;
5668
5669 case OP_RR_RNSC:
5670 {
5671 po_scalar_or_goto (8, try_rr);
5672 break;
5673 try_rr:
5674 po_reg_or_fail (REG_TYPE_RN);
5675 }
5676 break;
5677
5678 case OP_RNSDQ_RNSC:
5679 {
5680 po_scalar_or_goto (8, try_nsdq);
5681 break;
5682 try_nsdq:
5683 po_reg_or_fail (REG_TYPE_NSDQ);
5684 }
5685 break;
5686
5687 case OP_RNDQ_RNSC:
5688 {
5689 po_scalar_or_goto (8, try_ndq);
5690 break;
5691 try_ndq:
5692 po_reg_or_fail (REG_TYPE_NDQ);
5693 }
5694 break;
5695
5696 case OP_RND_RNSC:
5697 {
5698 po_scalar_or_goto (8, try_vfd);
5699 break;
5700 try_vfd:
5701 po_reg_or_fail (REG_TYPE_VFD);
5702 }
5703 break;
5704
5705 case OP_VMOV:
5706 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5707 not careful then bad things might happen. */
5708 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5709 break;
5710
5711 case OP_RNDQ_IMVNb:
5712 {
5713 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5714 break;
5715 try_mvnimm:
5716 /* There's a possibility of getting a 64-bit immediate here, so
5717 we need special handling. */
5718 if (parse_big_immediate (&str, i) == FAIL)
5719 {
5720 inst.error = _("immediate value is out of range");
5721 goto failure;
5722 }
5723 }
5724 break;
5725
5726 case OP_RNDQ_I63b:
5727 {
5728 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5729 break;
5730 try_shimm:
5731 po_imm_or_fail (0, 63, TRUE);
5732 }
5733 break;
5734
5735 case OP_RRnpcb:
5736 po_char_or_fail ('[');
5737 po_reg_or_fail (REG_TYPE_RN);
5738 po_char_or_fail (']');
5739 break;
5740
5741 case OP_RRw:
5742 case OP_oRRw:
5743 po_reg_or_fail (REG_TYPE_RN);
5744 if (skip_past_char (&str, '!') == SUCCESS)
5745 inst.operands[i].writeback = 1;
5746 break;
5747
5748 /* Immediates */
5749 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5750 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5751 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5752 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5753 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5754 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5755 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5756 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5757 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5758 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5759 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5760 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5761
5762 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5763 case OP_oI7b:
5764 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5765 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5766 case OP_oI31b:
5767 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5768 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5769 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5770
5771 /* Immediate variants */
5772 case OP_oI255c:
5773 po_char_or_fail ('{');
5774 po_imm_or_fail (0, 255, TRUE);
5775 po_char_or_fail ('}');
5776 break;
5777
5778 case OP_I31w:
5779 /* The expression parser chokes on a trailing !, so we have
5780 to find it first and zap it. */
5781 {
5782 char *s = str;
5783 while (*s && *s != ',')
5784 s++;
5785 if (s[-1] == '!')
5786 {
5787 s[-1] = '\0';
5788 inst.operands[i].writeback = 1;
5789 }
5790 po_imm_or_fail (0, 31, TRUE);
5791 if (str == s - 1)
5792 str = s;
5793 }
5794 break;
5795
5796 /* Expressions */
5797 case OP_EXPi: EXPi:
5798 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5799 GE_OPT_PREFIX));
5800 break;
5801
5802 case OP_EXP:
5803 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5804 GE_NO_PREFIX));
5805 break;
5806
5807 case OP_EXPr: EXPr:
5808 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5809 GE_NO_PREFIX));
5810 if (inst.reloc.exp.X_op == O_symbol)
5811 {
5812 val = parse_reloc (&str);
5813 if (val == -1)
5814 {
5815 inst.error = _("unrecognized relocation suffix");
5816 goto failure;
5817 }
5818 else if (val != BFD_RELOC_UNUSED)
5819 {
5820 inst.operands[i].imm = val;
5821 inst.operands[i].hasreloc = 1;
5822 }
5823 }
5824 break;
5825
5826 /* Operand for MOVW or MOVT. */
5827 case OP_HALF:
5828 po_misc_or_fail (parse_half (&str));
5829 break;
5830
5831 /* Register or expression */
5832 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5833 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5834
5835 /* Register or immediate */
5836 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5837 I0: po_imm_or_fail (0, 0, FALSE); break;
5838
5839 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5840 IF:
5841 if (!is_immediate_prefix (*str))
5842 goto bad_args;
5843 str++;
5844 val = parse_fpa_immediate (&str);
5845 if (val == FAIL)
5846 goto failure;
5847 /* FPA immediates are encoded as registers 8-15.
5848 parse_fpa_immediate has already applied the offset. */
5849 inst.operands[i].reg = val;
5850 inst.operands[i].isreg = 1;
5851 break;
5852
5853 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5854 I32z: po_imm_or_fail (0, 32, FALSE); break;
5855
5856 /* Two kinds of register */
5857 case OP_RIWR_RIWC:
5858 {
5859 struct reg_entry *rege = arm_reg_parse_multi (&str);
5860 if (!rege
5861 || (rege->type != REG_TYPE_MMXWR
5862 && rege->type != REG_TYPE_MMXWC
5863 && rege->type != REG_TYPE_MMXWCG))
5864 {
5865 inst.error = _("iWMMXt data or control register expected");
5866 goto failure;
5867 }
5868 inst.operands[i].reg = rege->number;
5869 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5870 }
5871 break;
5872
5873 case OP_RIWC_RIWG:
5874 {
5875 struct reg_entry *rege = arm_reg_parse_multi (&str);
5876 if (!rege
5877 || (rege->type != REG_TYPE_MMXWC
5878 && rege->type != REG_TYPE_MMXWCG))
5879 {
5880 inst.error = _("iWMMXt control register expected");
5881 goto failure;
5882 }
5883 inst.operands[i].reg = rege->number;
5884 inst.operands[i].isreg = 1;
5885 }
5886 break;
5887
5888 /* Misc */
5889 case OP_CPSF: val = parse_cps_flags (&str); break;
5890 case OP_ENDI: val = parse_endian_specifier (&str); break;
5891 case OP_oROR: val = parse_ror (&str); break;
5892 case OP_PSR: val = parse_psr (&str); break;
5893 case OP_COND: val = parse_cond (&str); break;
5894 case OP_oBARRIER:val = parse_barrier (&str); break;
5895
5896 case OP_RVC_PSR:
5897 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5898 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5899 break;
5900 try_psr:
5901 val = parse_psr (&str);
5902 break;
5903
5904 case OP_APSR_RR:
5905 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5906 break;
5907 try_apsr:
5908 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5909 instruction). */
5910 if (strncasecmp (str, "APSR_", 5) == 0)
5911 {
5912 unsigned found = 0;
5913 str += 5;
5914 while (found < 15)
5915 switch (*str++)
5916 {
5917 case 'c': found = (found & 1) ? 16 : found | 1; break;
5918 case 'n': found = (found & 2) ? 16 : found | 2; break;
5919 case 'z': found = (found & 4) ? 16 : found | 4; break;
5920 case 'v': found = (found & 8) ? 16 : found | 8; break;
5921 default: found = 16;
5922 }
5923 if (found != 15)
5924 goto failure;
5925 inst.operands[i].isvec = 1;
5926 }
5927 else
5928 goto failure;
5929 break;
5930
5931 case OP_TB:
5932 po_misc_or_fail (parse_tb (&str));
5933 break;
5934
5935 /* Register lists */
5936 case OP_REGLST:
5937 val = parse_reg_list (&str);
5938 if (*str == '^')
5939 {
5940 inst.operands[1].writeback = 1;
5941 str++;
5942 }
5943 break;
5944
5945 case OP_VRSLST:
5946 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5947 break;
5948
5949 case OP_VRDLST:
5950 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5951 break;
5952
5953 case OP_VRSDLST:
5954 /* Allow Q registers too. */
5955 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5956 REGLIST_NEON_D);
5957 if (val == FAIL)
5958 {
5959 inst.error = NULL;
5960 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5961 REGLIST_VFP_S);
5962 inst.operands[i].issingle = 1;
5963 }
5964 break;
5965
5966 case OP_NRDLST:
5967 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5968 REGLIST_NEON_D);
5969 break;
5970
5971 case OP_NSTRLST:
5972 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5973 &inst.operands[i].vectype);
5974 break;
5975
5976 /* Addressing modes */
5977 case OP_ADDR:
5978 po_misc_or_fail (parse_address (&str, i));
5979 break;
5980
5981 case OP_ADDRGLDR:
5982 po_misc_or_fail_no_backtrack (
5983 parse_address_group_reloc (&str, i, GROUP_LDR));
5984 break;
5985
5986 case OP_ADDRGLDRS:
5987 po_misc_or_fail_no_backtrack (
5988 parse_address_group_reloc (&str, i, GROUP_LDRS));
5989 break;
5990
5991 case OP_ADDRGLDC:
5992 po_misc_or_fail_no_backtrack (
5993 parse_address_group_reloc (&str, i, GROUP_LDC));
5994 break;
5995
5996 case OP_SH:
5997 po_misc_or_fail (parse_shifter_operand (&str, i));
5998 break;
5999
6000 case OP_SHG:
6001 po_misc_or_fail_no_backtrack (
6002 parse_shifter_operand_group_reloc (&str, i));
6003 break;
6004
6005 case OP_oSHll:
6006 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6007 break;
6008
6009 case OP_oSHar:
6010 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6011 break;
6012
6013 case OP_oSHllar:
6014 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6015 break;
6016
6017 default:
6018 as_fatal ("unhandled operand code %d", upat[i]);
6019 }
6020
6021 /* Various value-based sanity checks and shared operations. We
6022 do not signal immediate failures for the register constraints;
6023 this allows a syntax error to take precedence. */
6024 switch (upat[i])
6025 {
6026 case OP_oRRnpc:
6027 case OP_RRnpc:
6028 case OP_RRnpcb:
6029 case OP_RRw:
6030 case OP_oRRw:
6031 case OP_RRnpc_I0:
6032 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6033 inst.error = BAD_PC;
6034 break;
6035
6036 case OP_CPSF:
6037 case OP_ENDI:
6038 case OP_oROR:
6039 case OP_PSR:
6040 case OP_RVC_PSR:
6041 case OP_COND:
6042 case OP_oBARRIER:
6043 case OP_REGLST:
6044 case OP_VRSLST:
6045 case OP_VRDLST:
6046 case OP_VRSDLST:
6047 case OP_NRDLST:
6048 case OP_NSTRLST:
6049 if (val == FAIL)
6050 goto failure;
6051 inst.operands[i].imm = val;
6052 break;
6053
6054 default:
6055 break;
6056 }
6057
6058 /* If we get here, this operand was successfully parsed. */
6059 inst.operands[i].present = 1;
6060 continue;
6061
6062 bad_args:
6063 inst.error = BAD_ARGS;
6064
6065 failure:
6066 if (!backtrack_pos)
6067 {
6068 /* The parse routine should already have set inst.error, but set a
6069 defaut here just in case. */
6070 if (!inst.error)
6071 inst.error = _("syntax error");
6072 return FAIL;
6073 }
6074
6075 /* Do not backtrack over a trailing optional argument that
6076 absorbed some text. We will only fail again, with the
6077 'garbage following instruction' error message, which is
6078 probably less helpful than the current one. */
6079 if (backtrack_index == i && backtrack_pos != str
6080 && upat[i+1] == OP_stop)
6081 {
6082 if (!inst.error)
6083 inst.error = _("syntax error");
6084 return FAIL;
6085 }
6086
6087 /* Try again, skipping the optional argument at backtrack_pos. */
6088 str = backtrack_pos;
6089 inst.error = backtrack_error;
6090 inst.operands[backtrack_index].present = 0;
6091 i = backtrack_index;
6092 backtrack_pos = 0;
6093 }
6094
6095 /* Check that we have parsed all the arguments. */
6096 if (*str != '\0' && !inst.error)
6097 inst.error = _("garbage following instruction");
6098
6099 return inst.error ? FAIL : SUCCESS;
6100 }
6101
6102 #undef po_char_or_fail
6103 #undef po_reg_or_fail
6104 #undef po_reg_or_goto
6105 #undef po_imm_or_fail
6106 #undef po_scalar_or_fail
6107 \f
6108 /* Shorthand macro for instruction encoding functions issuing errors. */
6109 #define constraint(expr, err) do { \
6110 if (expr) \
6111 { \
6112 inst.error = err; \
6113 return; \
6114 } \
6115 } while (0)
6116
6117 /* Functions for operand encoding. ARM, then Thumb. */
6118
6119 #define rotate_left(v, n) (v << n | v >> (32 - n))
6120
6121 /* If VAL can be encoded in the immediate field of an ARM instruction,
6122 return the encoded form. Otherwise, return FAIL. */
6123
6124 static unsigned int
6125 encode_arm_immediate (unsigned int val)
6126 {
6127 unsigned int a, i;
6128
6129 for (i = 0; i < 32; i += 2)
6130 if ((a = rotate_left (val, i)) <= 0xff)
6131 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6132
6133 return FAIL;
6134 }
6135
6136 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6137 return the encoded form. Otherwise, return FAIL. */
6138 static unsigned int
6139 encode_thumb32_immediate (unsigned int val)
6140 {
6141 unsigned int a, i;
6142
6143 if (val <= 0xff)
6144 return val;
6145
6146 for (i = 1; i <= 24; i++)
6147 {
6148 a = val >> i;
6149 if ((val & ~(0xff << i)) == 0)
6150 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6151 }
6152
6153 a = val & 0xff;
6154 if (val == ((a << 16) | a))
6155 return 0x100 | a;
6156 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6157 return 0x300 | a;
6158
6159 a = val & 0xff00;
6160 if (val == ((a << 16) | a))
6161 return 0x200 | (a >> 8);
6162
6163 return FAIL;
6164 }
6165 /* Encode a VFP SP or DP register number into inst.instruction. */
6166
6167 static void
6168 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6169 {
6170 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6171 && reg > 15)
6172 {
6173 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
6174 {
6175 if (thumb_mode)
6176 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6177 fpu_vfp_ext_v3);
6178 else
6179 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6180 fpu_vfp_ext_v3);
6181 }
6182 else
6183 {
6184 first_error (_("D register out of range for selected VFP version"));
6185 return;
6186 }
6187 }
6188
6189 switch (pos)
6190 {
6191 case VFP_REG_Sd:
6192 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6193 break;
6194
6195 case VFP_REG_Sn:
6196 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6197 break;
6198
6199 case VFP_REG_Sm:
6200 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6201 break;
6202
6203 case VFP_REG_Dd:
6204 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6205 break;
6206
6207 case VFP_REG_Dn:
6208 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6209 break;
6210
6211 case VFP_REG_Dm:
6212 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6213 break;
6214
6215 default:
6216 abort ();
6217 }
6218 }
6219
6220 /* Encode a <shift> in an ARM-format instruction. The immediate,
6221 if any, is handled by md_apply_fix. */
6222 static void
6223 encode_arm_shift (int i)
6224 {
6225 if (inst.operands[i].shift_kind == SHIFT_RRX)
6226 inst.instruction |= SHIFT_ROR << 5;
6227 else
6228 {
6229 inst.instruction |= inst.operands[i].shift_kind << 5;
6230 if (inst.operands[i].immisreg)
6231 {
6232 inst.instruction |= SHIFT_BY_REG;
6233 inst.instruction |= inst.operands[i].imm << 8;
6234 }
6235 else
6236 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6237 }
6238 }
6239
6240 static void
6241 encode_arm_shifter_operand (int i)
6242 {
6243 if (inst.operands[i].isreg)
6244 {
6245 inst.instruction |= inst.operands[i].reg;
6246 encode_arm_shift (i);
6247 }
6248 else
6249 inst.instruction |= INST_IMMEDIATE;
6250 }
6251
6252 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6253 static void
6254 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6255 {
6256 assert (inst.operands[i].isreg);
6257 inst.instruction |= inst.operands[i].reg << 16;
6258
6259 if (inst.operands[i].preind)
6260 {
6261 if (is_t)
6262 {
6263 inst.error = _("instruction does not accept preindexed addressing");
6264 return;
6265 }
6266 inst.instruction |= PRE_INDEX;
6267 if (inst.operands[i].writeback)
6268 inst.instruction |= WRITE_BACK;
6269
6270 }
6271 else if (inst.operands[i].postind)
6272 {
6273 assert (inst.operands[i].writeback);
6274 if (is_t)
6275 inst.instruction |= WRITE_BACK;
6276 }
6277 else /* unindexed - only for coprocessor */
6278 {
6279 inst.error = _("instruction does not accept unindexed addressing");
6280 return;
6281 }
6282
6283 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6284 && (((inst.instruction & 0x000f0000) >> 16)
6285 == ((inst.instruction & 0x0000f000) >> 12)))
6286 as_warn ((inst.instruction & LOAD_BIT)
6287 ? _("destination register same as write-back base")
6288 : _("source register same as write-back base"));
6289 }
6290
6291 /* inst.operands[i] was set up by parse_address. Encode it into an
6292 ARM-format mode 2 load or store instruction. If is_t is true,
6293 reject forms that cannot be used with a T instruction (i.e. not
6294 post-indexed). */
6295 static void
6296 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6297 {
6298 encode_arm_addr_mode_common (i, is_t);
6299
6300 if (inst.operands[i].immisreg)
6301 {
6302 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6303 inst.instruction |= inst.operands[i].imm;
6304 if (!inst.operands[i].negative)
6305 inst.instruction |= INDEX_UP;
6306 if (inst.operands[i].shifted)
6307 {
6308 if (inst.operands[i].shift_kind == SHIFT_RRX)
6309 inst.instruction |= SHIFT_ROR << 5;
6310 else
6311 {
6312 inst.instruction |= inst.operands[i].shift_kind << 5;
6313 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6314 }
6315 }
6316 }
6317 else /* immediate offset in inst.reloc */
6318 {
6319 if (inst.reloc.type == BFD_RELOC_UNUSED)
6320 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6321 }
6322 }
6323
6324 /* inst.operands[i] was set up by parse_address. Encode it into an
6325 ARM-format mode 3 load or store instruction. Reject forms that
6326 cannot be used with such instructions. If is_t is true, reject
6327 forms that cannot be used with a T instruction (i.e. not
6328 post-indexed). */
6329 static void
6330 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6331 {
6332 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6333 {
6334 inst.error = _("instruction does not accept scaled register index");
6335 return;
6336 }
6337
6338 encode_arm_addr_mode_common (i, is_t);
6339
6340 if (inst.operands[i].immisreg)
6341 {
6342 inst.instruction |= inst.operands[i].imm;
6343 if (!inst.operands[i].negative)
6344 inst.instruction |= INDEX_UP;
6345 }
6346 else /* immediate offset in inst.reloc */
6347 {
6348 inst.instruction |= HWOFFSET_IMM;
6349 if (inst.reloc.type == BFD_RELOC_UNUSED)
6350 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6351 }
6352 }
6353
6354 /* inst.operands[i] was set up by parse_address. Encode it into an
6355 ARM-format instruction. Reject all forms which cannot be encoded
6356 into a coprocessor load/store instruction. If wb_ok is false,
6357 reject use of writeback; if unind_ok is false, reject use of
6358 unindexed addressing. If reloc_override is not 0, use it instead
6359 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6360 (in which case it is preserved). */
6361
6362 static int
6363 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6364 {
6365 inst.instruction |= inst.operands[i].reg << 16;
6366
6367 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6368
6369 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6370 {
6371 assert (!inst.operands[i].writeback);
6372 if (!unind_ok)
6373 {
6374 inst.error = _("instruction does not support unindexed addressing");
6375 return FAIL;
6376 }
6377 inst.instruction |= inst.operands[i].imm;
6378 inst.instruction |= INDEX_UP;
6379 return SUCCESS;
6380 }
6381
6382 if (inst.operands[i].preind)
6383 inst.instruction |= PRE_INDEX;
6384
6385 if (inst.operands[i].writeback)
6386 {
6387 if (inst.operands[i].reg == REG_PC)
6388 {
6389 inst.error = _("pc may not be used with write-back");
6390 return FAIL;
6391 }
6392 if (!wb_ok)
6393 {
6394 inst.error = _("instruction does not support writeback");
6395 return FAIL;
6396 }
6397 inst.instruction |= WRITE_BACK;
6398 }
6399
6400 if (reloc_override)
6401 inst.reloc.type = reloc_override;
6402 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6403 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6404 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6405 {
6406 if (thumb_mode)
6407 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6408 else
6409 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6410 }
6411
6412 return SUCCESS;
6413 }
6414
6415 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6416 Determine whether it can be performed with a move instruction; if
6417 it can, convert inst.instruction to that move instruction and
6418 return 1; if it can't, convert inst.instruction to a literal-pool
6419 load and return 0. If this is not a valid thing to do in the
6420 current context, set inst.error and return 1.
6421
6422 inst.operands[i] describes the destination register. */
6423
6424 static int
6425 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6426 {
6427 unsigned long tbit;
6428
6429 if (thumb_p)
6430 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6431 else
6432 tbit = LOAD_BIT;
6433
6434 if ((inst.instruction & tbit) == 0)
6435 {
6436 inst.error = _("invalid pseudo operation");
6437 return 1;
6438 }
6439 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6440 {
6441 inst.error = _("constant expression expected");
6442 return 1;
6443 }
6444 if (inst.reloc.exp.X_op == O_constant)
6445 {
6446 if (thumb_p)
6447 {
6448 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6449 {
6450 /* This can be done with a mov(1) instruction. */
6451 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6452 inst.instruction |= inst.reloc.exp.X_add_number;
6453 return 1;
6454 }
6455 }
6456 else
6457 {
6458 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6459 if (value != FAIL)
6460 {
6461 /* This can be done with a mov instruction. */
6462 inst.instruction &= LITERAL_MASK;
6463 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6464 inst.instruction |= value & 0xfff;
6465 return 1;
6466 }
6467
6468 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6469 if (value != FAIL)
6470 {
6471 /* This can be done with a mvn instruction. */
6472 inst.instruction &= LITERAL_MASK;
6473 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6474 inst.instruction |= value & 0xfff;
6475 return 1;
6476 }
6477 }
6478 }
6479
6480 if (add_to_lit_pool () == FAIL)
6481 {
6482 inst.error = _("literal pool insertion failed");
6483 return 1;
6484 }
6485 inst.operands[1].reg = REG_PC;
6486 inst.operands[1].isreg = 1;
6487 inst.operands[1].preind = 1;
6488 inst.reloc.pc_rel = 1;
6489 inst.reloc.type = (thumb_p
6490 ? BFD_RELOC_ARM_THUMB_OFFSET
6491 : (mode_3
6492 ? BFD_RELOC_ARM_HWLITERAL
6493 : BFD_RELOC_ARM_LITERAL));
6494 return 0;
6495 }
6496
6497 /* Functions for instruction encoding, sorted by subarchitecture.
6498 First some generics; their names are taken from the conventional
6499 bit positions for register arguments in ARM format instructions. */
6500
6501 static void
6502 do_noargs (void)
6503 {
6504 }
6505
6506 static void
6507 do_rd (void)
6508 {
6509 inst.instruction |= inst.operands[0].reg << 12;
6510 }
6511
6512 static void
6513 do_rd_rm (void)
6514 {
6515 inst.instruction |= inst.operands[0].reg << 12;
6516 inst.instruction |= inst.operands[1].reg;
6517 }
6518
6519 static void
6520 do_rd_rn (void)
6521 {
6522 inst.instruction |= inst.operands[0].reg << 12;
6523 inst.instruction |= inst.operands[1].reg << 16;
6524 }
6525
6526 static void
6527 do_rn_rd (void)
6528 {
6529 inst.instruction |= inst.operands[0].reg << 16;
6530 inst.instruction |= inst.operands[1].reg << 12;
6531 }
6532
6533 static void
6534 do_rd_rm_rn (void)
6535 {
6536 unsigned Rn = inst.operands[2].reg;
6537 /* Enforce restrictions on SWP instruction. */
6538 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6539 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6540 _("Rn must not overlap other operands"));
6541 inst.instruction |= inst.operands[0].reg << 12;
6542 inst.instruction |= inst.operands[1].reg;
6543 inst.instruction |= Rn << 16;
6544 }
6545
6546 static void
6547 do_rd_rn_rm (void)
6548 {
6549 inst.instruction |= inst.operands[0].reg << 12;
6550 inst.instruction |= inst.operands[1].reg << 16;
6551 inst.instruction |= inst.operands[2].reg;
6552 }
6553
6554 static void
6555 do_rm_rd_rn (void)
6556 {
6557 inst.instruction |= inst.operands[0].reg;
6558 inst.instruction |= inst.operands[1].reg << 12;
6559 inst.instruction |= inst.operands[2].reg << 16;
6560 }
6561
6562 static void
6563 do_imm0 (void)
6564 {
6565 inst.instruction |= inst.operands[0].imm;
6566 }
6567
6568 static void
6569 do_rd_cpaddr (void)
6570 {
6571 inst.instruction |= inst.operands[0].reg << 12;
6572 encode_arm_cp_address (1, TRUE, TRUE, 0);
6573 }
6574
6575 /* ARM instructions, in alphabetical order by function name (except
6576 that wrapper functions appear immediately after the function they
6577 wrap). */
6578
6579 /* This is a pseudo-op of the form "adr rd, label" to be converted
6580 into a relative address of the form "add rd, pc, #label-.-8". */
6581
6582 static void
6583 do_adr (void)
6584 {
6585 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6586
6587 /* Frag hacking will turn this into a sub instruction if the offset turns
6588 out to be negative. */
6589 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6590 inst.reloc.pc_rel = 1;
6591 inst.reloc.exp.X_add_number -= 8;
6592 }
6593
6594 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6595 into a relative address of the form:
6596 add rd, pc, #low(label-.-8)"
6597 add rd, rd, #high(label-.-8)" */
6598
6599 static void
6600 do_adrl (void)
6601 {
6602 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6603
6604 /* Frag hacking will turn this into a sub instruction if the offset turns
6605 out to be negative. */
6606 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6607 inst.reloc.pc_rel = 1;
6608 inst.size = INSN_SIZE * 2;
6609 inst.reloc.exp.X_add_number -= 8;
6610 }
6611
6612 static void
6613 do_arit (void)
6614 {
6615 if (!inst.operands[1].present)
6616 inst.operands[1].reg = inst.operands[0].reg;
6617 inst.instruction |= inst.operands[0].reg << 12;
6618 inst.instruction |= inst.operands[1].reg << 16;
6619 encode_arm_shifter_operand (2);
6620 }
6621
6622 static void
6623 do_barrier (void)
6624 {
6625 if (inst.operands[0].present)
6626 {
6627 constraint ((inst.instruction & 0xf0) != 0x40
6628 && inst.operands[0].imm != 0xf,
6629 "bad barrier type");
6630 inst.instruction |= inst.operands[0].imm;
6631 }
6632 else
6633 inst.instruction |= 0xf;
6634 }
6635
6636 static void
6637 do_bfc (void)
6638 {
6639 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6640 constraint (msb > 32, _("bit-field extends past end of register"));
6641 /* The instruction encoding stores the LSB and MSB,
6642 not the LSB and width. */
6643 inst.instruction |= inst.operands[0].reg << 12;
6644 inst.instruction |= inst.operands[1].imm << 7;
6645 inst.instruction |= (msb - 1) << 16;
6646 }
6647
6648 static void
6649 do_bfi (void)
6650 {
6651 unsigned int msb;
6652
6653 /* #0 in second position is alternative syntax for bfc, which is
6654 the same instruction but with REG_PC in the Rm field. */
6655 if (!inst.operands[1].isreg)
6656 inst.operands[1].reg = REG_PC;
6657
6658 msb = inst.operands[2].imm + inst.operands[3].imm;
6659 constraint (msb > 32, _("bit-field extends past end of register"));
6660 /* The instruction encoding stores the LSB and MSB,
6661 not the LSB and width. */
6662 inst.instruction |= inst.operands[0].reg << 12;
6663 inst.instruction |= inst.operands[1].reg;
6664 inst.instruction |= inst.operands[2].imm << 7;
6665 inst.instruction |= (msb - 1) << 16;
6666 }
6667
6668 static void
6669 do_bfx (void)
6670 {
6671 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6672 _("bit-field extends past end of register"));
6673 inst.instruction |= inst.operands[0].reg << 12;
6674 inst.instruction |= inst.operands[1].reg;
6675 inst.instruction |= inst.operands[2].imm << 7;
6676 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6677 }
6678
6679 /* ARM V5 breakpoint instruction (argument parse)
6680 BKPT <16 bit unsigned immediate>
6681 Instruction is not conditional.
6682 The bit pattern given in insns[] has the COND_ALWAYS condition,
6683 and it is an error if the caller tried to override that. */
6684
6685 static void
6686 do_bkpt (void)
6687 {
6688 /* Top 12 of 16 bits to bits 19:8. */
6689 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6690
6691 /* Bottom 4 of 16 bits to bits 3:0. */
6692 inst.instruction |= inst.operands[0].imm & 0xf;
6693 }
6694
6695 static void
6696 encode_branch (int default_reloc)
6697 {
6698 if (inst.operands[0].hasreloc)
6699 {
6700 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6701 _("the only suffix valid here is '(plt)'"));
6702 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6703 }
6704 else
6705 {
6706 inst.reloc.type = default_reloc;
6707 }
6708 inst.reloc.pc_rel = 1;
6709 }
6710
6711 static void
6712 do_branch (void)
6713 {
6714 #ifdef OBJ_ELF
6715 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6716 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6717 else
6718 #endif
6719 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6720 }
6721
6722 static void
6723 do_bl (void)
6724 {
6725 #ifdef OBJ_ELF
6726 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6727 {
6728 if (inst.cond == COND_ALWAYS)
6729 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6730 else
6731 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6732 }
6733 else
6734 #endif
6735 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6736 }
6737
6738 /* ARM V5 branch-link-exchange instruction (argument parse)
6739 BLX <target_addr> ie BLX(1)
6740 BLX{<condition>} <Rm> ie BLX(2)
6741 Unfortunately, there are two different opcodes for this mnemonic.
6742 So, the insns[].value is not used, and the code here zaps values
6743 into inst.instruction.
6744 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6745
6746 static void
6747 do_blx (void)
6748 {
6749 if (inst.operands[0].isreg)
6750 {
6751 /* Arg is a register; the opcode provided by insns[] is correct.
6752 It is not illegal to do "blx pc", just useless. */
6753 if (inst.operands[0].reg == REG_PC)
6754 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6755
6756 inst.instruction |= inst.operands[0].reg;
6757 }
6758 else
6759 {
6760 /* Arg is an address; this instruction cannot be executed
6761 conditionally, and the opcode must be adjusted. */
6762 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6763 inst.instruction = 0xfa000000;
6764 #ifdef OBJ_ELF
6765 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6766 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6767 else
6768 #endif
6769 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6770 }
6771 }
6772
6773 static void
6774 do_bx (void)
6775 {
6776 if (inst.operands[0].reg == REG_PC)
6777 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6778
6779 inst.instruction |= inst.operands[0].reg;
6780 }
6781
6782
6783 /* ARM v5TEJ. Jump to Jazelle code. */
6784
6785 static void
6786 do_bxj (void)
6787 {
6788 if (inst.operands[0].reg == REG_PC)
6789 as_tsktsk (_("use of r15 in bxj is not really useful"));
6790
6791 inst.instruction |= inst.operands[0].reg;
6792 }
6793
6794 /* Co-processor data operation:
6795 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6796 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6797 static void
6798 do_cdp (void)
6799 {
6800 inst.instruction |= inst.operands[0].reg << 8;
6801 inst.instruction |= inst.operands[1].imm << 20;
6802 inst.instruction |= inst.operands[2].reg << 12;
6803 inst.instruction |= inst.operands[3].reg << 16;
6804 inst.instruction |= inst.operands[4].reg;
6805 inst.instruction |= inst.operands[5].imm << 5;
6806 }
6807
6808 static void
6809 do_cmp (void)
6810 {
6811 inst.instruction |= inst.operands[0].reg << 16;
6812 encode_arm_shifter_operand (1);
6813 }
6814
6815 /* Transfer between coprocessor and ARM registers.
6816 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6817 MRC2
6818 MCR{cond}
6819 MCR2
6820
6821 No special properties. */
6822
6823 static void
6824 do_co_reg (void)
6825 {
6826 inst.instruction |= inst.operands[0].reg << 8;
6827 inst.instruction |= inst.operands[1].imm << 21;
6828 inst.instruction |= inst.operands[2].reg << 12;
6829 inst.instruction |= inst.operands[3].reg << 16;
6830 inst.instruction |= inst.operands[4].reg;
6831 inst.instruction |= inst.operands[5].imm << 5;
6832 }
6833
6834 /* Transfer between coprocessor register and pair of ARM registers.
6835 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6836 MCRR2
6837 MRRC{cond}
6838 MRRC2
6839
6840 Two XScale instructions are special cases of these:
6841
6842 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6843 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6844
6845 Result unpredicatable if Rd or Rn is R15. */
6846
6847 static void
6848 do_co_reg2c (void)
6849 {
6850 inst.instruction |= inst.operands[0].reg << 8;
6851 inst.instruction |= inst.operands[1].imm << 4;
6852 inst.instruction |= inst.operands[2].reg << 12;
6853 inst.instruction |= inst.operands[3].reg << 16;
6854 inst.instruction |= inst.operands[4].reg;
6855 }
6856
6857 static void
6858 do_cpsi (void)
6859 {
6860 inst.instruction |= inst.operands[0].imm << 6;
6861 if (inst.operands[1].present)
6862 {
6863 inst.instruction |= CPSI_MMOD;
6864 inst.instruction |= inst.operands[1].imm;
6865 }
6866 }
6867
6868 static void
6869 do_dbg (void)
6870 {
6871 inst.instruction |= inst.operands[0].imm;
6872 }
6873
6874 static void
6875 do_it (void)
6876 {
6877 /* There is no IT instruction in ARM mode. We
6878 process it but do not generate code for it. */
6879 inst.size = 0;
6880 }
6881
6882 static void
6883 do_ldmstm (void)
6884 {
6885 int base_reg = inst.operands[0].reg;
6886 int range = inst.operands[1].imm;
6887
6888 inst.instruction |= base_reg << 16;
6889 inst.instruction |= range;
6890
6891 if (inst.operands[1].writeback)
6892 inst.instruction |= LDM_TYPE_2_OR_3;
6893
6894 if (inst.operands[0].writeback)
6895 {
6896 inst.instruction |= WRITE_BACK;
6897 /* Check for unpredictable uses of writeback. */
6898 if (inst.instruction & LOAD_BIT)
6899 {
6900 /* Not allowed in LDM type 2. */
6901 if ((inst.instruction & LDM_TYPE_2_OR_3)
6902 && ((range & (1 << REG_PC)) == 0))
6903 as_warn (_("writeback of base register is UNPREDICTABLE"));
6904 /* Only allowed if base reg not in list for other types. */
6905 else if (range & (1 << base_reg))
6906 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6907 }
6908 else /* STM. */
6909 {
6910 /* Not allowed for type 2. */
6911 if (inst.instruction & LDM_TYPE_2_OR_3)
6912 as_warn (_("writeback of base register is UNPREDICTABLE"));
6913 /* Only allowed if base reg not in list, or first in list. */
6914 else if ((range & (1 << base_reg))
6915 && (range & ((1 << base_reg) - 1)))
6916 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6917 }
6918 }
6919 }
6920
6921 /* ARMv5TE load-consecutive (argument parse)
6922 Mode is like LDRH.
6923
6924 LDRccD R, mode
6925 STRccD R, mode. */
6926
6927 static void
6928 do_ldrd (void)
6929 {
6930 constraint (inst.operands[0].reg % 2 != 0,
6931 _("first destination register must be even"));
6932 constraint (inst.operands[1].present
6933 && inst.operands[1].reg != inst.operands[0].reg + 1,
6934 _("can only load two consecutive registers"));
6935 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6936 constraint (!inst.operands[2].isreg, _("'[' expected"));
6937
6938 if (!inst.operands[1].present)
6939 inst.operands[1].reg = inst.operands[0].reg + 1;
6940
6941 if (inst.instruction & LOAD_BIT)
6942 {
6943 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6944 register and the first register written; we have to diagnose
6945 overlap between the base and the second register written here. */
6946
6947 if (inst.operands[2].reg == inst.operands[1].reg
6948 && (inst.operands[2].writeback || inst.operands[2].postind))
6949 as_warn (_("base register written back, and overlaps "
6950 "second destination register"));
6951
6952 /* For an index-register load, the index register must not overlap the
6953 destination (even if not write-back). */
6954 else if (inst.operands[2].immisreg
6955 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6956 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6957 as_warn (_("index register overlaps destination register"));
6958 }
6959
6960 inst.instruction |= inst.operands[0].reg << 12;
6961 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6962 }
6963
6964 static void
6965 do_ldrex (void)
6966 {
6967 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6968 || inst.operands[1].postind || inst.operands[1].writeback
6969 || inst.operands[1].immisreg || inst.operands[1].shifted
6970 || inst.operands[1].negative
6971 /* This can arise if the programmer has written
6972 strex rN, rM, foo
6973 or if they have mistakenly used a register name as the last
6974 operand, eg:
6975 strex rN, rM, rX
6976 It is very difficult to distinguish between these two cases
6977 because "rX" might actually be a label. ie the register
6978 name has been occluded by a symbol of the same name. So we
6979 just generate a general 'bad addressing mode' type error
6980 message and leave it up to the programmer to discover the
6981 true cause and fix their mistake. */
6982 || (inst.operands[1].reg == REG_PC),
6983 BAD_ADDR_MODE);
6984
6985 constraint (inst.reloc.exp.X_op != O_constant
6986 || inst.reloc.exp.X_add_number != 0,
6987 _("offset must be zero in ARM encoding"));
6988
6989 inst.instruction |= inst.operands[0].reg << 12;
6990 inst.instruction |= inst.operands[1].reg << 16;
6991 inst.reloc.type = BFD_RELOC_UNUSED;
6992 }
6993
6994 static void
6995 do_ldrexd (void)
6996 {
6997 constraint (inst.operands[0].reg % 2 != 0,
6998 _("even register required"));
6999 constraint (inst.operands[1].present
7000 && inst.operands[1].reg != inst.operands[0].reg + 1,
7001 _("can only load two consecutive registers"));
7002 /* If op 1 were present and equal to PC, this function wouldn't
7003 have been called in the first place. */
7004 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7005
7006 inst.instruction |= inst.operands[0].reg << 12;
7007 inst.instruction |= inst.operands[2].reg << 16;
7008 }
7009
7010 static void
7011 do_ldst (void)
7012 {
7013 inst.instruction |= inst.operands[0].reg << 12;
7014 if (!inst.operands[1].isreg)
7015 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7016 return;
7017 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7018 }
7019
7020 static void
7021 do_ldstt (void)
7022 {
7023 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7024 reject [Rn,...]. */
7025 if (inst.operands[1].preind)
7026 {
7027 constraint (inst.reloc.exp.X_op != O_constant ||
7028 inst.reloc.exp.X_add_number != 0,
7029 _("this instruction requires a post-indexed address"));
7030
7031 inst.operands[1].preind = 0;
7032 inst.operands[1].postind = 1;
7033 inst.operands[1].writeback = 1;
7034 }
7035 inst.instruction |= inst.operands[0].reg << 12;
7036 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7037 }
7038
7039 /* Halfword and signed-byte load/store operations. */
7040
7041 static void
7042 do_ldstv4 (void)
7043 {
7044 inst.instruction |= inst.operands[0].reg << 12;
7045 if (!inst.operands[1].isreg)
7046 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7047 return;
7048 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7049 }
7050
7051 static void
7052 do_ldsttv4 (void)
7053 {
7054 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7055 reject [Rn,...]. */
7056 if (inst.operands[1].preind)
7057 {
7058 constraint (inst.reloc.exp.X_op != O_constant ||
7059 inst.reloc.exp.X_add_number != 0,
7060 _("this instruction requires a post-indexed address"));
7061
7062 inst.operands[1].preind = 0;
7063 inst.operands[1].postind = 1;
7064 inst.operands[1].writeback = 1;
7065 }
7066 inst.instruction |= inst.operands[0].reg << 12;
7067 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7068 }
7069
7070 /* Co-processor register load/store.
7071 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7072 static void
7073 do_lstc (void)
7074 {
7075 inst.instruction |= inst.operands[0].reg << 8;
7076 inst.instruction |= inst.operands[1].reg << 12;
7077 encode_arm_cp_address (2, TRUE, TRUE, 0);
7078 }
7079
7080 static void
7081 do_mlas (void)
7082 {
7083 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7084 if (inst.operands[0].reg == inst.operands[1].reg
7085 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7086 && !(inst.instruction & 0x00400000))
7087 as_tsktsk (_("Rd and Rm should be different in mla"));
7088
7089 inst.instruction |= inst.operands[0].reg << 16;
7090 inst.instruction |= inst.operands[1].reg;
7091 inst.instruction |= inst.operands[2].reg << 8;
7092 inst.instruction |= inst.operands[3].reg << 12;
7093 }
7094
7095 static void
7096 do_mov (void)
7097 {
7098 inst.instruction |= inst.operands[0].reg << 12;
7099 encode_arm_shifter_operand (1);
7100 }
7101
7102 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7103 static void
7104 do_mov16 (void)
7105 {
7106 bfd_vma imm;
7107 bfd_boolean top;
7108
7109 top = (inst.instruction & 0x00400000) != 0;
7110 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7111 _(":lower16: not allowed this instruction"));
7112 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7113 _(":upper16: not allowed instruction"));
7114 inst.instruction |= inst.operands[0].reg << 12;
7115 if (inst.reloc.type == BFD_RELOC_UNUSED)
7116 {
7117 imm = inst.reloc.exp.X_add_number;
7118 /* The value is in two pieces: 0:11, 16:19. */
7119 inst.instruction |= (imm & 0x00000fff);
7120 inst.instruction |= (imm & 0x0000f000) << 4;
7121 }
7122 }
7123
7124 static void do_vfp_nsyn_opcode (const char *);
7125
7126 static int
7127 do_vfp_nsyn_mrs (void)
7128 {
7129 if (inst.operands[0].isvec)
7130 {
7131 if (inst.operands[1].reg != 1)
7132 first_error (_("operand 1 must be FPSCR"));
7133 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7134 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7135 do_vfp_nsyn_opcode ("fmstat");
7136 }
7137 else if (inst.operands[1].isvec)
7138 do_vfp_nsyn_opcode ("fmrx");
7139 else
7140 return FAIL;
7141
7142 return SUCCESS;
7143 }
7144
7145 static int
7146 do_vfp_nsyn_msr (void)
7147 {
7148 if (inst.operands[0].isvec)
7149 do_vfp_nsyn_opcode ("fmxr");
7150 else
7151 return FAIL;
7152
7153 return SUCCESS;
7154 }
7155
7156 static void
7157 do_mrs (void)
7158 {
7159 if (do_vfp_nsyn_mrs () == SUCCESS)
7160 return;
7161
7162 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7163 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7164 != (PSR_c|PSR_f),
7165 _("'CPSR' or 'SPSR' expected"));
7166 inst.instruction |= inst.operands[0].reg << 12;
7167 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7168 }
7169
7170 /* Two possible forms:
7171 "{C|S}PSR_<field>, Rm",
7172 "{C|S}PSR_f, #expression". */
7173
7174 static void
7175 do_msr (void)
7176 {
7177 if (do_vfp_nsyn_msr () == SUCCESS)
7178 return;
7179
7180 inst.instruction |= inst.operands[0].imm;
7181 if (inst.operands[1].isreg)
7182 inst.instruction |= inst.operands[1].reg;
7183 else
7184 {
7185 inst.instruction |= INST_IMMEDIATE;
7186 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7187 inst.reloc.pc_rel = 0;
7188 }
7189 }
7190
7191 static void
7192 do_mul (void)
7193 {
7194 if (!inst.operands[2].present)
7195 inst.operands[2].reg = inst.operands[0].reg;
7196 inst.instruction |= inst.operands[0].reg << 16;
7197 inst.instruction |= inst.operands[1].reg;
7198 inst.instruction |= inst.operands[2].reg << 8;
7199
7200 if (inst.operands[0].reg == inst.operands[1].reg
7201 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7202 as_tsktsk (_("Rd and Rm should be different in mul"));
7203 }
7204
7205 /* Long Multiply Parser
7206 UMULL RdLo, RdHi, Rm, Rs
7207 SMULL RdLo, RdHi, Rm, Rs
7208 UMLAL RdLo, RdHi, Rm, Rs
7209 SMLAL RdLo, RdHi, Rm, Rs. */
7210
7211 static void
7212 do_mull (void)
7213 {
7214 inst.instruction |= inst.operands[0].reg << 12;
7215 inst.instruction |= inst.operands[1].reg << 16;
7216 inst.instruction |= inst.operands[2].reg;
7217 inst.instruction |= inst.operands[3].reg << 8;
7218
7219 /* rdhi, rdlo and rm must all be different. */
7220 if (inst.operands[0].reg == inst.operands[1].reg
7221 || inst.operands[0].reg == inst.operands[2].reg
7222 || inst.operands[1].reg == inst.operands[2].reg)
7223 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7224 }
7225
7226 static void
7227 do_nop (void)
7228 {
7229 if (inst.operands[0].present)
7230 {
7231 /* Architectural NOP hints are CPSR sets with no bits selected. */
7232 inst.instruction &= 0xf0000000;
7233 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7234 }
7235 }
7236
7237 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7238 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7239 Condition defaults to COND_ALWAYS.
7240 Error if Rd, Rn or Rm are R15. */
7241
7242 static void
7243 do_pkhbt (void)
7244 {
7245 inst.instruction |= inst.operands[0].reg << 12;
7246 inst.instruction |= inst.operands[1].reg << 16;
7247 inst.instruction |= inst.operands[2].reg;
7248 if (inst.operands[3].present)
7249 encode_arm_shift (3);
7250 }
7251
7252 /* ARM V6 PKHTB (Argument Parse). */
7253
7254 static void
7255 do_pkhtb (void)
7256 {
7257 if (!inst.operands[3].present)
7258 {
7259 /* If the shift specifier is omitted, turn the instruction
7260 into pkhbt rd, rm, rn. */
7261 inst.instruction &= 0xfff00010;
7262 inst.instruction |= inst.operands[0].reg << 12;
7263 inst.instruction |= inst.operands[1].reg;
7264 inst.instruction |= inst.operands[2].reg << 16;
7265 }
7266 else
7267 {
7268 inst.instruction |= inst.operands[0].reg << 12;
7269 inst.instruction |= inst.operands[1].reg << 16;
7270 inst.instruction |= inst.operands[2].reg;
7271 encode_arm_shift (3);
7272 }
7273 }
7274
7275 /* ARMv5TE: Preload-Cache
7276
7277 PLD <addr_mode>
7278
7279 Syntactically, like LDR with B=1, W=0, L=1. */
7280
7281 static void
7282 do_pld (void)
7283 {
7284 constraint (!inst.operands[0].isreg,
7285 _("'[' expected after PLD mnemonic"));
7286 constraint (inst.operands[0].postind,
7287 _("post-indexed expression used in preload instruction"));
7288 constraint (inst.operands[0].writeback,
7289 _("writeback used in preload instruction"));
7290 constraint (!inst.operands[0].preind,
7291 _("unindexed addressing used in preload instruction"));
7292 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7293 }
7294
7295 /* ARMv7: PLI <addr_mode> */
7296 static void
7297 do_pli (void)
7298 {
7299 constraint (!inst.operands[0].isreg,
7300 _("'[' expected after PLI mnemonic"));
7301 constraint (inst.operands[0].postind,
7302 _("post-indexed expression used in preload instruction"));
7303 constraint (inst.operands[0].writeback,
7304 _("writeback used in preload instruction"));
7305 constraint (!inst.operands[0].preind,
7306 _("unindexed addressing used in preload instruction"));
7307 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7308 inst.instruction &= ~PRE_INDEX;
7309 }
7310
7311 static void
7312 do_push_pop (void)
7313 {
7314 inst.operands[1] = inst.operands[0];
7315 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7316 inst.operands[0].isreg = 1;
7317 inst.operands[0].writeback = 1;
7318 inst.operands[0].reg = REG_SP;
7319 do_ldmstm ();
7320 }
7321
7322 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7323 word at the specified address and the following word
7324 respectively.
7325 Unconditionally executed.
7326 Error if Rn is R15. */
7327
7328 static void
7329 do_rfe (void)
7330 {
7331 inst.instruction |= inst.operands[0].reg << 16;
7332 if (inst.operands[0].writeback)
7333 inst.instruction |= WRITE_BACK;
7334 }
7335
7336 /* ARM V6 ssat (argument parse). */
7337
7338 static void
7339 do_ssat (void)
7340 {
7341 inst.instruction |= inst.operands[0].reg << 12;
7342 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7343 inst.instruction |= inst.operands[2].reg;
7344
7345 if (inst.operands[3].present)
7346 encode_arm_shift (3);
7347 }
7348
7349 /* ARM V6 usat (argument parse). */
7350
7351 static void
7352 do_usat (void)
7353 {
7354 inst.instruction |= inst.operands[0].reg << 12;
7355 inst.instruction |= inst.operands[1].imm << 16;
7356 inst.instruction |= inst.operands[2].reg;
7357
7358 if (inst.operands[3].present)
7359 encode_arm_shift (3);
7360 }
7361
7362 /* ARM V6 ssat16 (argument parse). */
7363
7364 static void
7365 do_ssat16 (void)
7366 {
7367 inst.instruction |= inst.operands[0].reg << 12;
7368 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7369 inst.instruction |= inst.operands[2].reg;
7370 }
7371
7372 static void
7373 do_usat16 (void)
7374 {
7375 inst.instruction |= inst.operands[0].reg << 12;
7376 inst.instruction |= inst.operands[1].imm << 16;
7377 inst.instruction |= inst.operands[2].reg;
7378 }
7379
7380 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7381 preserving the other bits.
7382
7383 setend <endian_specifier>, where <endian_specifier> is either
7384 BE or LE. */
7385
7386 static void
7387 do_setend (void)
7388 {
7389 if (inst.operands[0].imm)
7390 inst.instruction |= 0x200;
7391 }
7392
7393 static void
7394 do_shift (void)
7395 {
7396 unsigned int Rm = (inst.operands[1].present
7397 ? inst.operands[1].reg
7398 : inst.operands[0].reg);
7399
7400 inst.instruction |= inst.operands[0].reg << 12;
7401 inst.instruction |= Rm;
7402 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7403 {
7404 inst.instruction |= inst.operands[2].reg << 8;
7405 inst.instruction |= SHIFT_BY_REG;
7406 }
7407 else
7408 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7409 }
7410
7411 static void
7412 do_smc (void)
7413 {
7414 inst.reloc.type = BFD_RELOC_ARM_SMC;
7415 inst.reloc.pc_rel = 0;
7416 }
7417
7418 static void
7419 do_swi (void)
7420 {
7421 inst.reloc.type = BFD_RELOC_ARM_SWI;
7422 inst.reloc.pc_rel = 0;
7423 }
7424
7425 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7426 SMLAxy{cond} Rd,Rm,Rs,Rn
7427 SMLAWy{cond} Rd,Rm,Rs,Rn
7428 Error if any register is R15. */
7429
7430 static void
7431 do_smla (void)
7432 {
7433 inst.instruction |= inst.operands[0].reg << 16;
7434 inst.instruction |= inst.operands[1].reg;
7435 inst.instruction |= inst.operands[2].reg << 8;
7436 inst.instruction |= inst.operands[3].reg << 12;
7437 }
7438
7439 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7440 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7441 Error if any register is R15.
7442 Warning if Rdlo == Rdhi. */
7443
7444 static void
7445 do_smlal (void)
7446 {
7447 inst.instruction |= inst.operands[0].reg << 12;
7448 inst.instruction |= inst.operands[1].reg << 16;
7449 inst.instruction |= inst.operands[2].reg;
7450 inst.instruction |= inst.operands[3].reg << 8;
7451
7452 if (inst.operands[0].reg == inst.operands[1].reg)
7453 as_tsktsk (_("rdhi and rdlo must be different"));
7454 }
7455
7456 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7457 SMULxy{cond} Rd,Rm,Rs
7458 Error if any register is R15. */
7459
7460 static void
7461 do_smul (void)
7462 {
7463 inst.instruction |= inst.operands[0].reg << 16;
7464 inst.instruction |= inst.operands[1].reg;
7465 inst.instruction |= inst.operands[2].reg << 8;
7466 }
7467
7468 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7469 the same for both ARM and Thumb-2. */
7470
7471 static void
7472 do_srs (void)
7473 {
7474 int reg;
7475
7476 if (inst.operands[0].present)
7477 {
7478 reg = inst.operands[0].reg;
7479 constraint (reg != 13, _("SRS base register must be r13"));
7480 }
7481 else
7482 reg = 13;
7483
7484 inst.instruction |= reg << 16;
7485 inst.instruction |= inst.operands[1].imm;
7486 if (inst.operands[0].writeback || inst.operands[1].writeback)
7487 inst.instruction |= WRITE_BACK;
7488 }
7489
7490 /* ARM V6 strex (argument parse). */
7491
7492 static void
7493 do_strex (void)
7494 {
7495 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7496 || inst.operands[2].postind || inst.operands[2].writeback
7497 || inst.operands[2].immisreg || inst.operands[2].shifted
7498 || inst.operands[2].negative
7499 /* See comment in do_ldrex(). */
7500 || (inst.operands[2].reg == REG_PC),
7501 BAD_ADDR_MODE);
7502
7503 constraint (inst.operands[0].reg == inst.operands[1].reg
7504 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7505
7506 constraint (inst.reloc.exp.X_op != O_constant
7507 || inst.reloc.exp.X_add_number != 0,
7508 _("offset must be zero in ARM encoding"));
7509
7510 inst.instruction |= inst.operands[0].reg << 12;
7511 inst.instruction |= inst.operands[1].reg;
7512 inst.instruction |= inst.operands[2].reg << 16;
7513 inst.reloc.type = BFD_RELOC_UNUSED;
7514 }
7515
7516 static void
7517 do_strexd (void)
7518 {
7519 constraint (inst.operands[1].reg % 2 != 0,
7520 _("even register required"));
7521 constraint (inst.operands[2].present
7522 && inst.operands[2].reg != inst.operands[1].reg + 1,
7523 _("can only store two consecutive registers"));
7524 /* If op 2 were present and equal to PC, this function wouldn't
7525 have been called in the first place. */
7526 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7527
7528 constraint (inst.operands[0].reg == inst.operands[1].reg
7529 || inst.operands[0].reg == inst.operands[1].reg + 1
7530 || inst.operands[0].reg == inst.operands[3].reg,
7531 BAD_OVERLAP);
7532
7533 inst.instruction |= inst.operands[0].reg << 12;
7534 inst.instruction |= inst.operands[1].reg;
7535 inst.instruction |= inst.operands[3].reg << 16;
7536 }
7537
7538 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7539 extends it to 32-bits, and adds the result to a value in another
7540 register. You can specify a rotation by 0, 8, 16, or 24 bits
7541 before extracting the 16-bit value.
7542 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7543 Condition defaults to COND_ALWAYS.
7544 Error if any register uses R15. */
7545
7546 static void
7547 do_sxtah (void)
7548 {
7549 inst.instruction |= inst.operands[0].reg << 12;
7550 inst.instruction |= inst.operands[1].reg << 16;
7551 inst.instruction |= inst.operands[2].reg;
7552 inst.instruction |= inst.operands[3].imm << 10;
7553 }
7554
7555 /* ARM V6 SXTH.
7556
7557 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7558 Condition defaults to COND_ALWAYS.
7559 Error if any register uses R15. */
7560
7561 static void
7562 do_sxth (void)
7563 {
7564 inst.instruction |= inst.operands[0].reg << 12;
7565 inst.instruction |= inst.operands[1].reg;
7566 inst.instruction |= inst.operands[2].imm << 10;
7567 }
7568 \f
7569 /* VFP instructions. In a logical order: SP variant first, monad
7570 before dyad, arithmetic then move then load/store. */
7571
7572 static void
7573 do_vfp_sp_monadic (void)
7574 {
7575 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7576 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7577 }
7578
7579 static void
7580 do_vfp_sp_dyadic (void)
7581 {
7582 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7583 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7584 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7585 }
7586
7587 static void
7588 do_vfp_sp_compare_z (void)
7589 {
7590 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7591 }
7592
7593 static void
7594 do_vfp_dp_sp_cvt (void)
7595 {
7596 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7597 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7598 }
7599
7600 static void
7601 do_vfp_sp_dp_cvt (void)
7602 {
7603 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7604 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7605 }
7606
7607 static void
7608 do_vfp_reg_from_sp (void)
7609 {
7610 inst.instruction |= inst.operands[0].reg << 12;
7611 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7612 }
7613
7614 static void
7615 do_vfp_reg2_from_sp2 (void)
7616 {
7617 constraint (inst.operands[2].imm != 2,
7618 _("only two consecutive VFP SP registers allowed here"));
7619 inst.instruction |= inst.operands[0].reg << 12;
7620 inst.instruction |= inst.operands[1].reg << 16;
7621 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7622 }
7623
7624 static void
7625 do_vfp_sp_from_reg (void)
7626 {
7627 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7628 inst.instruction |= inst.operands[1].reg << 12;
7629 }
7630
7631 static void
7632 do_vfp_sp2_from_reg2 (void)
7633 {
7634 constraint (inst.operands[0].imm != 2,
7635 _("only two consecutive VFP SP registers allowed here"));
7636 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7637 inst.instruction |= inst.operands[1].reg << 12;
7638 inst.instruction |= inst.operands[2].reg << 16;
7639 }
7640
7641 static void
7642 do_vfp_sp_ldst (void)
7643 {
7644 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7645 encode_arm_cp_address (1, FALSE, TRUE, 0);
7646 }
7647
7648 static void
7649 do_vfp_dp_ldst (void)
7650 {
7651 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7652 encode_arm_cp_address (1, FALSE, TRUE, 0);
7653 }
7654
7655
7656 static void
7657 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7658 {
7659 if (inst.operands[0].writeback)
7660 inst.instruction |= WRITE_BACK;
7661 else
7662 constraint (ldstm_type != VFP_LDSTMIA,
7663 _("this addressing mode requires base-register writeback"));
7664 inst.instruction |= inst.operands[0].reg << 16;
7665 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7666 inst.instruction |= inst.operands[1].imm;
7667 }
7668
7669 static void
7670 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7671 {
7672 int count;
7673
7674 if (inst.operands[0].writeback)
7675 inst.instruction |= WRITE_BACK;
7676 else
7677 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7678 _("this addressing mode requires base-register writeback"));
7679
7680 inst.instruction |= inst.operands[0].reg << 16;
7681 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7682
7683 count = inst.operands[1].imm << 1;
7684 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7685 count += 1;
7686
7687 inst.instruction |= count;
7688 }
7689
7690 static void
7691 do_vfp_sp_ldstmia (void)
7692 {
7693 vfp_sp_ldstm (VFP_LDSTMIA);
7694 }
7695
7696 static void
7697 do_vfp_sp_ldstmdb (void)
7698 {
7699 vfp_sp_ldstm (VFP_LDSTMDB);
7700 }
7701
7702 static void
7703 do_vfp_dp_ldstmia (void)
7704 {
7705 vfp_dp_ldstm (VFP_LDSTMIA);
7706 }
7707
7708 static void
7709 do_vfp_dp_ldstmdb (void)
7710 {
7711 vfp_dp_ldstm (VFP_LDSTMDB);
7712 }
7713
7714 static void
7715 do_vfp_xp_ldstmia (void)
7716 {
7717 vfp_dp_ldstm (VFP_LDSTMIAX);
7718 }
7719
7720 static void
7721 do_vfp_xp_ldstmdb (void)
7722 {
7723 vfp_dp_ldstm (VFP_LDSTMDBX);
7724 }
7725
7726 static void
7727 do_vfp_dp_rd_rm (void)
7728 {
7729 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7730 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7731 }
7732
7733 static void
7734 do_vfp_dp_rn_rd (void)
7735 {
7736 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7737 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7738 }
7739
7740 static void
7741 do_vfp_dp_rd_rn (void)
7742 {
7743 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7744 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7745 }
7746
7747 static void
7748 do_vfp_dp_rd_rn_rm (void)
7749 {
7750 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7751 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7752 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7753 }
7754
7755 static void
7756 do_vfp_dp_rd (void)
7757 {
7758 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7759 }
7760
7761 static void
7762 do_vfp_dp_rm_rd_rn (void)
7763 {
7764 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7765 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7766 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7767 }
7768
7769 /* VFPv3 instructions. */
7770 static void
7771 do_vfp_sp_const (void)
7772 {
7773 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7774 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7775 inst.instruction |= (inst.operands[1].imm & 0x0f);
7776 }
7777
7778 static void
7779 do_vfp_dp_const (void)
7780 {
7781 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7782 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7783 inst.instruction |= (inst.operands[1].imm & 0x0f);
7784 }
7785
7786 static void
7787 vfp_conv (int srcsize)
7788 {
7789 unsigned immbits = srcsize - inst.operands[1].imm;
7790 inst.instruction |= (immbits & 1) << 5;
7791 inst.instruction |= (immbits >> 1);
7792 }
7793
7794 static void
7795 do_vfp_sp_conv_16 (void)
7796 {
7797 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7798 vfp_conv (16);
7799 }
7800
7801 static void
7802 do_vfp_dp_conv_16 (void)
7803 {
7804 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7805 vfp_conv (16);
7806 }
7807
7808 static void
7809 do_vfp_sp_conv_32 (void)
7810 {
7811 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7812 vfp_conv (32);
7813 }
7814
7815 static void
7816 do_vfp_dp_conv_32 (void)
7817 {
7818 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7819 vfp_conv (32);
7820 }
7821
7822 \f
7823 /* FPA instructions. Also in a logical order. */
7824
7825 static void
7826 do_fpa_cmp (void)
7827 {
7828 inst.instruction |= inst.operands[0].reg << 16;
7829 inst.instruction |= inst.operands[1].reg;
7830 }
7831
7832 static void
7833 do_fpa_ldmstm (void)
7834 {
7835 inst.instruction |= inst.operands[0].reg << 12;
7836 switch (inst.operands[1].imm)
7837 {
7838 case 1: inst.instruction |= CP_T_X; break;
7839 case 2: inst.instruction |= CP_T_Y; break;
7840 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7841 case 4: break;
7842 default: abort ();
7843 }
7844
7845 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7846 {
7847 /* The instruction specified "ea" or "fd", so we can only accept
7848 [Rn]{!}. The instruction does not really support stacking or
7849 unstacking, so we have to emulate these by setting appropriate
7850 bits and offsets. */
7851 constraint (inst.reloc.exp.X_op != O_constant
7852 || inst.reloc.exp.X_add_number != 0,
7853 _("this instruction does not support indexing"));
7854
7855 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7856 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7857
7858 if (!(inst.instruction & INDEX_UP))
7859 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7860
7861 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7862 {
7863 inst.operands[2].preind = 0;
7864 inst.operands[2].postind = 1;
7865 }
7866 }
7867
7868 encode_arm_cp_address (2, TRUE, TRUE, 0);
7869 }
7870
7871 \f
7872 /* iWMMXt instructions: strictly in alphabetical order. */
7873
7874 static void
7875 do_iwmmxt_tandorc (void)
7876 {
7877 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7878 }
7879
7880 static void
7881 do_iwmmxt_textrc (void)
7882 {
7883 inst.instruction |= inst.operands[0].reg << 12;
7884 inst.instruction |= inst.operands[1].imm;
7885 }
7886
7887 static void
7888 do_iwmmxt_textrm (void)
7889 {
7890 inst.instruction |= inst.operands[0].reg << 12;
7891 inst.instruction |= inst.operands[1].reg << 16;
7892 inst.instruction |= inst.operands[2].imm;
7893 }
7894
7895 static void
7896 do_iwmmxt_tinsr (void)
7897 {
7898 inst.instruction |= inst.operands[0].reg << 16;
7899 inst.instruction |= inst.operands[1].reg << 12;
7900 inst.instruction |= inst.operands[2].imm;
7901 }
7902
7903 static void
7904 do_iwmmxt_tmia (void)
7905 {
7906 inst.instruction |= inst.operands[0].reg << 5;
7907 inst.instruction |= inst.operands[1].reg;
7908 inst.instruction |= inst.operands[2].reg << 12;
7909 }
7910
7911 static void
7912 do_iwmmxt_waligni (void)
7913 {
7914 inst.instruction |= inst.operands[0].reg << 12;
7915 inst.instruction |= inst.operands[1].reg << 16;
7916 inst.instruction |= inst.operands[2].reg;
7917 inst.instruction |= inst.operands[3].imm << 20;
7918 }
7919
7920 static void
7921 do_iwmmxt_wmerge (void)
7922 {
7923 inst.instruction |= inst.operands[0].reg << 12;
7924 inst.instruction |= inst.operands[1].reg << 16;
7925 inst.instruction |= inst.operands[2].reg;
7926 inst.instruction |= inst.operands[3].imm << 21;
7927 }
7928
7929 static void
7930 do_iwmmxt_wmov (void)
7931 {
7932 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7933 inst.instruction |= inst.operands[0].reg << 12;
7934 inst.instruction |= inst.operands[1].reg << 16;
7935 inst.instruction |= inst.operands[1].reg;
7936 }
7937
7938 static void
7939 do_iwmmxt_wldstbh (void)
7940 {
7941 int reloc;
7942 inst.instruction |= inst.operands[0].reg << 12;
7943 if (thumb_mode)
7944 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7945 else
7946 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7947 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7948 }
7949
7950 static void
7951 do_iwmmxt_wldstw (void)
7952 {
7953 /* RIWR_RIWC clears .isreg for a control register. */
7954 if (!inst.operands[0].isreg)
7955 {
7956 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7957 inst.instruction |= 0xf0000000;
7958 }
7959
7960 inst.instruction |= inst.operands[0].reg << 12;
7961 encode_arm_cp_address (1, TRUE, TRUE, 0);
7962 }
7963
7964 static void
7965 do_iwmmxt_wldstd (void)
7966 {
7967 inst.instruction |= inst.operands[0].reg << 12;
7968 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
7969 && inst.operands[1].immisreg)
7970 {
7971 inst.instruction &= ~0x1a000ff;
7972 inst.instruction |= (0xf << 28);
7973 if (inst.operands[1].preind)
7974 inst.instruction |= PRE_INDEX;
7975 if (!inst.operands[1].negative)
7976 inst.instruction |= INDEX_UP;
7977 if (inst.operands[1].writeback)
7978 inst.instruction |= WRITE_BACK;
7979 inst.instruction |= inst.operands[1].reg << 16;
7980 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7981 inst.instruction |= inst.operands[1].imm;
7982 }
7983 else
7984 encode_arm_cp_address (1, TRUE, FALSE, 0);
7985 }
7986
7987 static void
7988 do_iwmmxt_wshufh (void)
7989 {
7990 inst.instruction |= inst.operands[0].reg << 12;
7991 inst.instruction |= inst.operands[1].reg << 16;
7992 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7993 inst.instruction |= (inst.operands[2].imm & 0x0f);
7994 }
7995
7996 static void
7997 do_iwmmxt_wzero (void)
7998 {
7999 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8000 inst.instruction |= inst.operands[0].reg;
8001 inst.instruction |= inst.operands[0].reg << 12;
8002 inst.instruction |= inst.operands[0].reg << 16;
8003 }
8004
8005 static void
8006 do_iwmmxt_wrwrwr_or_imm5 (void)
8007 {
8008 if (inst.operands[2].isreg)
8009 do_rd_rn_rm ();
8010 else {
8011 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8012 _("immediate operand requires iWMMXt2"));
8013 do_rd_rn ();
8014 if (inst.operands[2].imm == 0)
8015 {
8016 switch ((inst.instruction >> 20) & 0xf)
8017 {
8018 case 4:
8019 case 5:
8020 case 6:
8021 case 7:
8022 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8023 inst.operands[2].imm = 16;
8024 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8025 break;
8026 case 8:
8027 case 9:
8028 case 10:
8029 case 11:
8030 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8031 inst.operands[2].imm = 32;
8032 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8033 break;
8034 case 12:
8035 case 13:
8036 case 14:
8037 case 15:
8038 {
8039 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8040 unsigned long wrn;
8041 wrn = (inst.instruction >> 16) & 0xf;
8042 inst.instruction &= 0xff0fff0f;
8043 inst.instruction |= wrn;
8044 /* Bail out here; the instruction is now assembled. */
8045 return;
8046 }
8047 }
8048 }
8049 /* Map 32 -> 0, etc. */
8050 inst.operands[2].imm &= 0x1f;
8051 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8052 }
8053 }
8054 \f
8055 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8056 operations first, then control, shift, and load/store. */
8057
8058 /* Insns like "foo X,Y,Z". */
8059
8060 static void
8061 do_mav_triple (void)
8062 {
8063 inst.instruction |= inst.operands[0].reg << 16;
8064 inst.instruction |= inst.operands[1].reg;
8065 inst.instruction |= inst.operands[2].reg << 12;
8066 }
8067
8068 /* Insns like "foo W,X,Y,Z".
8069 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8070
8071 static void
8072 do_mav_quad (void)
8073 {
8074 inst.instruction |= inst.operands[0].reg << 5;
8075 inst.instruction |= inst.operands[1].reg << 12;
8076 inst.instruction |= inst.operands[2].reg << 16;
8077 inst.instruction |= inst.operands[3].reg;
8078 }
8079
8080 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8081 static void
8082 do_mav_dspsc (void)
8083 {
8084 inst.instruction |= inst.operands[1].reg << 12;
8085 }
8086
8087 /* Maverick shift immediate instructions.
8088 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8089 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8090
8091 static void
8092 do_mav_shift (void)
8093 {
8094 int imm = inst.operands[2].imm;
8095
8096 inst.instruction |= inst.operands[0].reg << 12;
8097 inst.instruction |= inst.operands[1].reg << 16;
8098
8099 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8100 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8101 Bit 4 should be 0. */
8102 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8103
8104 inst.instruction |= imm;
8105 }
8106 \f
8107 /* XScale instructions. Also sorted arithmetic before move. */
8108
8109 /* Xscale multiply-accumulate (argument parse)
8110 MIAcc acc0,Rm,Rs
8111 MIAPHcc acc0,Rm,Rs
8112 MIAxycc acc0,Rm,Rs. */
8113
8114 static void
8115 do_xsc_mia (void)
8116 {
8117 inst.instruction |= inst.operands[1].reg;
8118 inst.instruction |= inst.operands[2].reg << 12;
8119 }
8120
8121 /* Xscale move-accumulator-register (argument parse)
8122
8123 MARcc acc0,RdLo,RdHi. */
8124
8125 static void
8126 do_xsc_mar (void)
8127 {
8128 inst.instruction |= inst.operands[1].reg << 12;
8129 inst.instruction |= inst.operands[2].reg << 16;
8130 }
8131
8132 /* Xscale move-register-accumulator (argument parse)
8133
8134 MRAcc RdLo,RdHi,acc0. */
8135
8136 static void
8137 do_xsc_mra (void)
8138 {
8139 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8140 inst.instruction |= inst.operands[0].reg << 12;
8141 inst.instruction |= inst.operands[1].reg << 16;
8142 }
8143 \f
8144 /* Encoding functions relevant only to Thumb. */
8145
8146 /* inst.operands[i] is a shifted-register operand; encode
8147 it into inst.instruction in the format used by Thumb32. */
8148
8149 static void
8150 encode_thumb32_shifted_operand (int i)
8151 {
8152 unsigned int value = inst.reloc.exp.X_add_number;
8153 unsigned int shift = inst.operands[i].shift_kind;
8154
8155 constraint (inst.operands[i].immisreg,
8156 _("shift by register not allowed in thumb mode"));
8157 inst.instruction |= inst.operands[i].reg;
8158 if (shift == SHIFT_RRX)
8159 inst.instruction |= SHIFT_ROR << 4;
8160 else
8161 {
8162 constraint (inst.reloc.exp.X_op != O_constant,
8163 _("expression too complex"));
8164
8165 constraint (value > 32
8166 || (value == 32 && (shift == SHIFT_LSL
8167 || shift == SHIFT_ROR)),
8168 _("shift expression is too large"));
8169
8170 if (value == 0)
8171 shift = SHIFT_LSL;
8172 else if (value == 32)
8173 value = 0;
8174
8175 inst.instruction |= shift << 4;
8176 inst.instruction |= (value & 0x1c) << 10;
8177 inst.instruction |= (value & 0x03) << 6;
8178 }
8179 }
8180
8181
8182 /* inst.operands[i] was set up by parse_address. Encode it into a
8183 Thumb32 format load or store instruction. Reject forms that cannot
8184 be used with such instructions. If is_t is true, reject forms that
8185 cannot be used with a T instruction; if is_d is true, reject forms
8186 that cannot be used with a D instruction. */
8187
8188 static void
8189 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8190 {
8191 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8192
8193 constraint (!inst.operands[i].isreg,
8194 _("Instruction does not support =N addresses"));
8195
8196 inst.instruction |= inst.operands[i].reg << 16;
8197 if (inst.operands[i].immisreg)
8198 {
8199 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8200 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8201 constraint (inst.operands[i].negative,
8202 _("Thumb does not support negative register indexing"));
8203 constraint (inst.operands[i].postind,
8204 _("Thumb does not support register post-indexing"));
8205 constraint (inst.operands[i].writeback,
8206 _("Thumb does not support register indexing with writeback"));
8207 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8208 _("Thumb supports only LSL in shifted register indexing"));
8209
8210 inst.instruction |= inst.operands[i].imm;
8211 if (inst.operands[i].shifted)
8212 {
8213 constraint (inst.reloc.exp.X_op != O_constant,
8214 _("expression too complex"));
8215 constraint (inst.reloc.exp.X_add_number < 0
8216 || inst.reloc.exp.X_add_number > 3,
8217 _("shift out of range"));
8218 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8219 }
8220 inst.reloc.type = BFD_RELOC_UNUSED;
8221 }
8222 else if (inst.operands[i].preind)
8223 {
8224 constraint (is_pc && inst.operands[i].writeback,
8225 _("cannot use writeback with PC-relative addressing"));
8226 constraint (is_t && inst.operands[i].writeback,
8227 _("cannot use writeback with this instruction"));
8228
8229 if (is_d)
8230 {
8231 inst.instruction |= 0x01000000;
8232 if (inst.operands[i].writeback)
8233 inst.instruction |= 0x00200000;
8234 }
8235 else
8236 {
8237 inst.instruction |= 0x00000c00;
8238 if (inst.operands[i].writeback)
8239 inst.instruction |= 0x00000100;
8240 }
8241 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8242 }
8243 else if (inst.operands[i].postind)
8244 {
8245 assert (inst.operands[i].writeback);
8246 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8247 constraint (is_t, _("cannot use post-indexing with this instruction"));
8248
8249 if (is_d)
8250 inst.instruction |= 0x00200000;
8251 else
8252 inst.instruction |= 0x00000900;
8253 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8254 }
8255 else /* unindexed - only for coprocessor */
8256 inst.error = _("instruction does not accept unindexed addressing");
8257 }
8258
8259 /* Table of Thumb instructions which exist in both 16- and 32-bit
8260 encodings (the latter only in post-V6T2 cores). The index is the
8261 value used in the insns table below. When there is more than one
8262 possible 16-bit encoding for the instruction, this table always
8263 holds variant (1).
8264 Also contains several pseudo-instructions used during relaxation. */
8265 #define T16_32_TAB \
8266 X(adc, 4140, eb400000), \
8267 X(adcs, 4140, eb500000), \
8268 X(add, 1c00, eb000000), \
8269 X(adds, 1c00, eb100000), \
8270 X(addi, 0000, f1000000), \
8271 X(addis, 0000, f1100000), \
8272 X(add_pc,000f, f20f0000), \
8273 X(add_sp,000d, f10d0000), \
8274 X(adr, 000f, f20f0000), \
8275 X(and, 4000, ea000000), \
8276 X(ands, 4000, ea100000), \
8277 X(asr, 1000, fa40f000), \
8278 X(asrs, 1000, fa50f000), \
8279 X(b, e000, f000b000), \
8280 X(bcond, d000, f0008000), \
8281 X(bic, 4380, ea200000), \
8282 X(bics, 4380, ea300000), \
8283 X(cmn, 42c0, eb100f00), \
8284 X(cmp, 2800, ebb00f00), \
8285 X(cpsie, b660, f3af8400), \
8286 X(cpsid, b670, f3af8600), \
8287 X(cpy, 4600, ea4f0000), \
8288 X(dec_sp,80dd, f1ad0d00), \
8289 X(eor, 4040, ea800000), \
8290 X(eors, 4040, ea900000), \
8291 X(inc_sp,00dd, f10d0d00), \
8292 X(ldmia, c800, e8900000), \
8293 X(ldr, 6800, f8500000), \
8294 X(ldrb, 7800, f8100000), \
8295 X(ldrh, 8800, f8300000), \
8296 X(ldrsb, 5600, f9100000), \
8297 X(ldrsh, 5e00, f9300000), \
8298 X(ldr_pc,4800, f85f0000), \
8299 X(ldr_pc2,4800, f85f0000), \
8300 X(ldr_sp,9800, f85d0000), \
8301 X(lsl, 0000, fa00f000), \
8302 X(lsls, 0000, fa10f000), \
8303 X(lsr, 0800, fa20f000), \
8304 X(lsrs, 0800, fa30f000), \
8305 X(mov, 2000, ea4f0000), \
8306 X(movs, 2000, ea5f0000), \
8307 X(mul, 4340, fb00f000), \
8308 X(muls, 4340, ffffffff), /* no 32b muls */ \
8309 X(mvn, 43c0, ea6f0000), \
8310 X(mvns, 43c0, ea7f0000), \
8311 X(neg, 4240, f1c00000), /* rsb #0 */ \
8312 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8313 X(orr, 4300, ea400000), \
8314 X(orrs, 4300, ea500000), \
8315 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8316 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8317 X(rev, ba00, fa90f080), \
8318 X(rev16, ba40, fa90f090), \
8319 X(revsh, bac0, fa90f0b0), \
8320 X(ror, 41c0, fa60f000), \
8321 X(rors, 41c0, fa70f000), \
8322 X(sbc, 4180, eb600000), \
8323 X(sbcs, 4180, eb700000), \
8324 X(stmia, c000, e8800000), \
8325 X(str, 6000, f8400000), \
8326 X(strb, 7000, f8000000), \
8327 X(strh, 8000, f8200000), \
8328 X(str_sp,9000, f84d0000), \
8329 X(sub, 1e00, eba00000), \
8330 X(subs, 1e00, ebb00000), \
8331 X(subi, 8000, f1a00000), \
8332 X(subis, 8000, f1b00000), \
8333 X(sxtb, b240, fa4ff080), \
8334 X(sxth, b200, fa0ff080), \
8335 X(tst, 4200, ea100f00), \
8336 X(uxtb, b2c0, fa5ff080), \
8337 X(uxth, b280, fa1ff080), \
8338 X(nop, bf00, f3af8000), \
8339 X(yield, bf10, f3af8001), \
8340 X(wfe, bf20, f3af8002), \
8341 X(wfi, bf30, f3af8003), \
8342 X(sev, bf40, f3af9004), /* typo, 8004? */
8343
8344 /* To catch errors in encoding functions, the codes are all offset by
8345 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8346 as 16-bit instructions. */
8347 #define X(a,b,c) T_MNEM_##a
8348 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8349 #undef X
8350
8351 #define X(a,b,c) 0x##b
8352 static const unsigned short thumb_op16[] = { T16_32_TAB };
8353 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8354 #undef X
8355
8356 #define X(a,b,c) 0x##c
8357 static const unsigned int thumb_op32[] = { T16_32_TAB };
8358 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8359 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8360 #undef X
8361 #undef T16_32_TAB
8362
8363 /* Thumb instruction encoders, in alphabetical order. */
8364
8365 /* ADDW or SUBW. */
8366 static void
8367 do_t_add_sub_w (void)
8368 {
8369 int Rd, Rn;
8370
8371 Rd = inst.operands[0].reg;
8372 Rn = inst.operands[1].reg;
8373
8374 constraint (Rd == 15, _("PC not allowed as destination"));
8375 inst.instruction |= (Rn << 16) | (Rd << 8);
8376 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8377 }
8378
8379 /* Parse an add or subtract instruction. We get here with inst.instruction
8380 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8381
8382 static void
8383 do_t_add_sub (void)
8384 {
8385 int Rd, Rs, Rn;
8386
8387 Rd = inst.operands[0].reg;
8388 Rs = (inst.operands[1].present
8389 ? inst.operands[1].reg /* Rd, Rs, foo */
8390 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8391
8392 if (unified_syntax)
8393 {
8394 bfd_boolean flags;
8395 bfd_boolean narrow;
8396 int opcode;
8397
8398 flags = (inst.instruction == T_MNEM_adds
8399 || inst.instruction == T_MNEM_subs);
8400 if (flags)
8401 narrow = (current_it_mask == 0);
8402 else
8403 narrow = (current_it_mask != 0);
8404 if (!inst.operands[2].isreg)
8405 {
8406 int add;
8407
8408 add = (inst.instruction == T_MNEM_add
8409 || inst.instruction == T_MNEM_adds);
8410 opcode = 0;
8411 if (inst.size_req != 4)
8412 {
8413 /* Attempt to use a narrow opcode, with relaxation if
8414 appropriate. */
8415 if (Rd == REG_SP && Rs == REG_SP && !flags)
8416 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8417 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8418 opcode = T_MNEM_add_sp;
8419 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8420 opcode = T_MNEM_add_pc;
8421 else if (Rd <= 7 && Rs <= 7 && narrow)
8422 {
8423 if (flags)
8424 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8425 else
8426 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8427 }
8428 if (opcode)
8429 {
8430 inst.instruction = THUMB_OP16(opcode);
8431 inst.instruction |= (Rd << 4) | Rs;
8432 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8433 if (inst.size_req != 2)
8434 inst.relax = opcode;
8435 }
8436 else
8437 constraint (inst.size_req == 2, BAD_HIREG);
8438 }
8439 if (inst.size_req == 4
8440 || (inst.size_req != 2 && !opcode))
8441 {
8442 if (Rs == REG_PC)
8443 {
8444 /* Always use addw/subw. */
8445 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8446 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8447 }
8448 else
8449 {
8450 inst.instruction = THUMB_OP32 (inst.instruction);
8451 inst.instruction = (inst.instruction & 0xe1ffffff)
8452 | 0x10000000;
8453 if (flags)
8454 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8455 else
8456 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8457 }
8458 inst.instruction |= Rd << 8;
8459 inst.instruction |= Rs << 16;
8460 }
8461 }
8462 else
8463 {
8464 Rn = inst.operands[2].reg;
8465 /* See if we can do this with a 16-bit instruction. */
8466 if (!inst.operands[2].shifted && inst.size_req != 4)
8467 {
8468 if (Rd > 7 || Rs > 7 || Rn > 7)
8469 narrow = FALSE;
8470
8471 if (narrow)
8472 {
8473 inst.instruction = ((inst.instruction == T_MNEM_adds
8474 || inst.instruction == T_MNEM_add)
8475 ? T_OPCODE_ADD_R3
8476 : T_OPCODE_SUB_R3);
8477 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8478 return;
8479 }
8480
8481 if (inst.instruction == T_MNEM_add)
8482 {
8483 if (Rd == Rs)
8484 {
8485 inst.instruction = T_OPCODE_ADD_HI;
8486 inst.instruction |= (Rd & 8) << 4;
8487 inst.instruction |= (Rd & 7);
8488 inst.instruction |= Rn << 3;
8489 return;
8490 }
8491 /* ... because addition is commutative! */
8492 else if (Rd == Rn)
8493 {
8494 inst.instruction = T_OPCODE_ADD_HI;
8495 inst.instruction |= (Rd & 8) << 4;
8496 inst.instruction |= (Rd & 7);
8497 inst.instruction |= Rs << 3;
8498 return;
8499 }
8500 }
8501 }
8502 /* If we get here, it can't be done in 16 bits. */
8503 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8504 _("shift must be constant"));
8505 inst.instruction = THUMB_OP32 (inst.instruction);
8506 inst.instruction |= Rd << 8;
8507 inst.instruction |= Rs << 16;
8508 encode_thumb32_shifted_operand (2);
8509 }
8510 }
8511 else
8512 {
8513 constraint (inst.instruction == T_MNEM_adds
8514 || inst.instruction == T_MNEM_subs,
8515 BAD_THUMB32);
8516
8517 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8518 {
8519 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8520 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8521 BAD_HIREG);
8522
8523 inst.instruction = (inst.instruction == T_MNEM_add
8524 ? 0x0000 : 0x8000);
8525 inst.instruction |= (Rd << 4) | Rs;
8526 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8527 return;
8528 }
8529
8530 Rn = inst.operands[2].reg;
8531 constraint (inst.operands[2].shifted, _("unshifted register required"));
8532
8533 /* We now have Rd, Rs, and Rn set to registers. */
8534 if (Rd > 7 || Rs > 7 || Rn > 7)
8535 {
8536 /* Can't do this for SUB. */
8537 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8538 inst.instruction = T_OPCODE_ADD_HI;
8539 inst.instruction |= (Rd & 8) << 4;
8540 inst.instruction |= (Rd & 7);
8541 if (Rs == Rd)
8542 inst.instruction |= Rn << 3;
8543 else if (Rn == Rd)
8544 inst.instruction |= Rs << 3;
8545 else
8546 constraint (1, _("dest must overlap one source register"));
8547 }
8548 else
8549 {
8550 inst.instruction = (inst.instruction == T_MNEM_add
8551 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8552 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8553 }
8554 }
8555 }
8556
8557 static void
8558 do_t_adr (void)
8559 {
8560 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
8561 {
8562 /* Defer to section relaxation. */
8563 inst.relax = inst.instruction;
8564 inst.instruction = THUMB_OP16 (inst.instruction);
8565 inst.instruction |= inst.operands[0].reg << 4;
8566 }
8567 else if (unified_syntax && inst.size_req != 2)
8568 {
8569 /* Generate a 32-bit opcode. */
8570 inst.instruction = THUMB_OP32 (inst.instruction);
8571 inst.instruction |= inst.operands[0].reg << 8;
8572 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8573 inst.reloc.pc_rel = 1;
8574 }
8575 else
8576 {
8577 /* Generate a 16-bit opcode. */
8578 inst.instruction = THUMB_OP16 (inst.instruction);
8579 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8580 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8581 inst.reloc.pc_rel = 1;
8582
8583 inst.instruction |= inst.operands[0].reg << 4;
8584 }
8585 }
8586
8587 /* Arithmetic instructions for which there is just one 16-bit
8588 instruction encoding, and it allows only two low registers.
8589 For maximal compatibility with ARM syntax, we allow three register
8590 operands even when Thumb-32 instructions are not available, as long
8591 as the first two are identical. For instance, both "sbc r0,r1" and
8592 "sbc r0,r0,r1" are allowed. */
8593 static void
8594 do_t_arit3 (void)
8595 {
8596 int Rd, Rs, Rn;
8597
8598 Rd = inst.operands[0].reg;
8599 Rs = (inst.operands[1].present
8600 ? inst.operands[1].reg /* Rd, Rs, foo */
8601 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8602 Rn = inst.operands[2].reg;
8603
8604 if (unified_syntax)
8605 {
8606 if (!inst.operands[2].isreg)
8607 {
8608 /* For an immediate, we always generate a 32-bit opcode;
8609 section relaxation will shrink it later if possible. */
8610 inst.instruction = THUMB_OP32 (inst.instruction);
8611 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8612 inst.instruction |= Rd << 8;
8613 inst.instruction |= Rs << 16;
8614 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8615 }
8616 else
8617 {
8618 bfd_boolean narrow;
8619
8620 /* See if we can do this with a 16-bit instruction. */
8621 if (THUMB_SETS_FLAGS (inst.instruction))
8622 narrow = current_it_mask == 0;
8623 else
8624 narrow = current_it_mask != 0;
8625
8626 if (Rd > 7 || Rn > 7 || Rs > 7)
8627 narrow = FALSE;
8628 if (inst.operands[2].shifted)
8629 narrow = FALSE;
8630 if (inst.size_req == 4)
8631 narrow = FALSE;
8632
8633 if (narrow
8634 && Rd == Rs)
8635 {
8636 inst.instruction = THUMB_OP16 (inst.instruction);
8637 inst.instruction |= Rd;
8638 inst.instruction |= Rn << 3;
8639 return;
8640 }
8641
8642 /* If we get here, it can't be done in 16 bits. */
8643 constraint (inst.operands[2].shifted
8644 && inst.operands[2].immisreg,
8645 _("shift must be constant"));
8646 inst.instruction = THUMB_OP32 (inst.instruction);
8647 inst.instruction |= Rd << 8;
8648 inst.instruction |= Rs << 16;
8649 encode_thumb32_shifted_operand (2);
8650 }
8651 }
8652 else
8653 {
8654 /* On its face this is a lie - the instruction does set the
8655 flags. However, the only supported mnemonic in this mode
8656 says it doesn't. */
8657 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8658
8659 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8660 _("unshifted register required"));
8661 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8662 constraint (Rd != Rs,
8663 _("dest and source1 must be the same register"));
8664
8665 inst.instruction = THUMB_OP16 (inst.instruction);
8666 inst.instruction |= Rd;
8667 inst.instruction |= Rn << 3;
8668 }
8669 }
8670
8671 /* Similarly, but for instructions where the arithmetic operation is
8672 commutative, so we can allow either of them to be different from
8673 the destination operand in a 16-bit instruction. For instance, all
8674 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8675 accepted. */
8676 static void
8677 do_t_arit3c (void)
8678 {
8679 int Rd, Rs, Rn;
8680
8681 Rd = inst.operands[0].reg;
8682 Rs = (inst.operands[1].present
8683 ? inst.operands[1].reg /* Rd, Rs, foo */
8684 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8685 Rn = inst.operands[2].reg;
8686
8687 if (unified_syntax)
8688 {
8689 if (!inst.operands[2].isreg)
8690 {
8691 /* For an immediate, we always generate a 32-bit opcode;
8692 section relaxation will shrink it later if possible. */
8693 inst.instruction = THUMB_OP32 (inst.instruction);
8694 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8695 inst.instruction |= Rd << 8;
8696 inst.instruction |= Rs << 16;
8697 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8698 }
8699 else
8700 {
8701 bfd_boolean narrow;
8702
8703 /* See if we can do this with a 16-bit instruction. */
8704 if (THUMB_SETS_FLAGS (inst.instruction))
8705 narrow = current_it_mask == 0;
8706 else
8707 narrow = current_it_mask != 0;
8708
8709 if (Rd > 7 || Rn > 7 || Rs > 7)
8710 narrow = FALSE;
8711 if (inst.operands[2].shifted)
8712 narrow = FALSE;
8713 if (inst.size_req == 4)
8714 narrow = FALSE;
8715
8716 if (narrow)
8717 {
8718 if (Rd == Rs)
8719 {
8720 inst.instruction = THUMB_OP16 (inst.instruction);
8721 inst.instruction |= Rd;
8722 inst.instruction |= Rn << 3;
8723 return;
8724 }
8725 if (Rd == Rn)
8726 {
8727 inst.instruction = THUMB_OP16 (inst.instruction);
8728 inst.instruction |= Rd;
8729 inst.instruction |= Rs << 3;
8730 return;
8731 }
8732 }
8733
8734 /* If we get here, it can't be done in 16 bits. */
8735 constraint (inst.operands[2].shifted
8736 && inst.operands[2].immisreg,
8737 _("shift must be constant"));
8738 inst.instruction = THUMB_OP32 (inst.instruction);
8739 inst.instruction |= Rd << 8;
8740 inst.instruction |= Rs << 16;
8741 encode_thumb32_shifted_operand (2);
8742 }
8743 }
8744 else
8745 {
8746 /* On its face this is a lie - the instruction does set the
8747 flags. However, the only supported mnemonic in this mode
8748 says it doesn't. */
8749 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8750
8751 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8752 _("unshifted register required"));
8753 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8754
8755 inst.instruction = THUMB_OP16 (inst.instruction);
8756 inst.instruction |= Rd;
8757
8758 if (Rd == Rs)
8759 inst.instruction |= Rn << 3;
8760 else if (Rd == Rn)
8761 inst.instruction |= Rs << 3;
8762 else
8763 constraint (1, _("dest must overlap one source register"));
8764 }
8765 }
8766
8767 static void
8768 do_t_barrier (void)
8769 {
8770 if (inst.operands[0].present)
8771 {
8772 constraint ((inst.instruction & 0xf0) != 0x40
8773 && inst.operands[0].imm != 0xf,
8774 "bad barrier type");
8775 inst.instruction |= inst.operands[0].imm;
8776 }
8777 else
8778 inst.instruction |= 0xf;
8779 }
8780
8781 static void
8782 do_t_bfc (void)
8783 {
8784 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8785 constraint (msb > 32, _("bit-field extends past end of register"));
8786 /* The instruction encoding stores the LSB and MSB,
8787 not the LSB and width. */
8788 inst.instruction |= inst.operands[0].reg << 8;
8789 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8790 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8791 inst.instruction |= msb - 1;
8792 }
8793
8794 static void
8795 do_t_bfi (void)
8796 {
8797 unsigned int msb;
8798
8799 /* #0 in second position is alternative syntax for bfc, which is
8800 the same instruction but with REG_PC in the Rm field. */
8801 if (!inst.operands[1].isreg)
8802 inst.operands[1].reg = REG_PC;
8803
8804 msb = inst.operands[2].imm + inst.operands[3].imm;
8805 constraint (msb > 32, _("bit-field extends past end of register"));
8806 /* The instruction encoding stores the LSB and MSB,
8807 not the LSB and width. */
8808 inst.instruction |= inst.operands[0].reg << 8;
8809 inst.instruction |= inst.operands[1].reg << 16;
8810 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8811 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8812 inst.instruction |= msb - 1;
8813 }
8814
8815 static void
8816 do_t_bfx (void)
8817 {
8818 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8819 _("bit-field extends past end of register"));
8820 inst.instruction |= inst.operands[0].reg << 8;
8821 inst.instruction |= inst.operands[1].reg << 16;
8822 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8823 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8824 inst.instruction |= inst.operands[3].imm - 1;
8825 }
8826
8827 /* ARM V5 Thumb BLX (argument parse)
8828 BLX <target_addr> which is BLX(1)
8829 BLX <Rm> which is BLX(2)
8830 Unfortunately, there are two different opcodes for this mnemonic.
8831 So, the insns[].value is not used, and the code here zaps values
8832 into inst.instruction.
8833
8834 ??? How to take advantage of the additional two bits of displacement
8835 available in Thumb32 mode? Need new relocation? */
8836
8837 static void
8838 do_t_blx (void)
8839 {
8840 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8841 if (inst.operands[0].isreg)
8842 /* We have a register, so this is BLX(2). */
8843 inst.instruction |= inst.operands[0].reg << 3;
8844 else
8845 {
8846 /* No register. This must be BLX(1). */
8847 inst.instruction = 0xf000e800;
8848 #ifdef OBJ_ELF
8849 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8850 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8851 else
8852 #endif
8853 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8854 inst.reloc.pc_rel = 1;
8855 }
8856 }
8857
8858 static void
8859 do_t_branch (void)
8860 {
8861 int opcode;
8862 int cond;
8863
8864 if (current_it_mask)
8865 {
8866 /* Conditional branches inside IT blocks are encoded as unconditional
8867 branches. */
8868 cond = COND_ALWAYS;
8869 /* A branch must be the last instruction in an IT block. */
8870 constraint (current_it_mask != 0x10, BAD_BRANCH);
8871 }
8872 else
8873 cond = inst.cond;
8874
8875 if (cond != COND_ALWAYS)
8876 opcode = T_MNEM_bcond;
8877 else
8878 opcode = inst.instruction;
8879
8880 if (unified_syntax && inst.size_req == 4)
8881 {
8882 inst.instruction = THUMB_OP32(opcode);
8883 if (cond == COND_ALWAYS)
8884 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8885 else
8886 {
8887 assert (cond != 0xF);
8888 inst.instruction |= cond << 22;
8889 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8890 }
8891 }
8892 else
8893 {
8894 inst.instruction = THUMB_OP16(opcode);
8895 if (cond == COND_ALWAYS)
8896 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8897 else
8898 {
8899 inst.instruction |= cond << 8;
8900 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8901 }
8902 /* Allow section relaxation. */
8903 if (unified_syntax && inst.size_req != 2)
8904 inst.relax = opcode;
8905 }
8906
8907 inst.reloc.pc_rel = 1;
8908 }
8909
8910 static void
8911 do_t_bkpt (void)
8912 {
8913 constraint (inst.cond != COND_ALWAYS,
8914 _("instruction is always unconditional"));
8915 if (inst.operands[0].present)
8916 {
8917 constraint (inst.operands[0].imm > 255,
8918 _("immediate value out of range"));
8919 inst.instruction |= inst.operands[0].imm;
8920 }
8921 }
8922
8923 static void
8924 do_t_branch23 (void)
8925 {
8926 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8927 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8928 inst.reloc.pc_rel = 1;
8929
8930 /* If the destination of the branch is a defined symbol which does not have
8931 the THUMB_FUNC attribute, then we must be calling a function which has
8932 the (interfacearm) attribute. We look for the Thumb entry point to that
8933 function and change the branch to refer to that function instead. */
8934 if ( inst.reloc.exp.X_op == O_symbol
8935 && inst.reloc.exp.X_add_symbol != NULL
8936 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8937 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8938 inst.reloc.exp.X_add_symbol =
8939 find_real_start (inst.reloc.exp.X_add_symbol);
8940 }
8941
8942 static void
8943 do_t_bx (void)
8944 {
8945 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8946 inst.instruction |= inst.operands[0].reg << 3;
8947 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8948 should cause the alignment to be checked once it is known. This is
8949 because BX PC only works if the instruction is word aligned. */
8950 }
8951
8952 static void
8953 do_t_bxj (void)
8954 {
8955 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8956 if (inst.operands[0].reg == REG_PC)
8957 as_tsktsk (_("use of r15 in bxj is not really useful"));
8958
8959 inst.instruction |= inst.operands[0].reg << 16;
8960 }
8961
8962 static void
8963 do_t_clz (void)
8964 {
8965 inst.instruction |= inst.operands[0].reg << 8;
8966 inst.instruction |= inst.operands[1].reg << 16;
8967 inst.instruction |= inst.operands[1].reg;
8968 }
8969
8970 static void
8971 do_t_cps (void)
8972 {
8973 constraint (current_it_mask, BAD_NOT_IT);
8974 inst.instruction |= inst.operands[0].imm;
8975 }
8976
8977 static void
8978 do_t_cpsi (void)
8979 {
8980 constraint (current_it_mask, BAD_NOT_IT);
8981 if (unified_syntax
8982 && (inst.operands[1].present || inst.size_req == 4)
8983 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8984 {
8985 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8986 inst.instruction = 0xf3af8000;
8987 inst.instruction |= imod << 9;
8988 inst.instruction |= inst.operands[0].imm << 5;
8989 if (inst.operands[1].present)
8990 inst.instruction |= 0x100 | inst.operands[1].imm;
8991 }
8992 else
8993 {
8994 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8995 && (inst.operands[0].imm & 4),
8996 _("selected processor does not support 'A' form "
8997 "of this instruction"));
8998 constraint (inst.operands[1].present || inst.size_req == 4,
8999 _("Thumb does not support the 2-argument "
9000 "form of this instruction"));
9001 inst.instruction |= inst.operands[0].imm;
9002 }
9003 }
9004
9005 /* THUMB CPY instruction (argument parse). */
9006
9007 static void
9008 do_t_cpy (void)
9009 {
9010 if (inst.size_req == 4)
9011 {
9012 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9013 inst.instruction |= inst.operands[0].reg << 8;
9014 inst.instruction |= inst.operands[1].reg;
9015 }
9016 else
9017 {
9018 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9019 inst.instruction |= (inst.operands[0].reg & 0x7);
9020 inst.instruction |= inst.operands[1].reg << 3;
9021 }
9022 }
9023
9024 static void
9025 do_t_cbz (void)
9026 {
9027 constraint (current_it_mask, BAD_NOT_IT);
9028 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9029 inst.instruction |= inst.operands[0].reg;
9030 inst.reloc.pc_rel = 1;
9031 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9032 }
9033
9034 static void
9035 do_t_dbg (void)
9036 {
9037 inst.instruction |= inst.operands[0].imm;
9038 }
9039
9040 static void
9041 do_t_div (void)
9042 {
9043 if (!inst.operands[1].present)
9044 inst.operands[1].reg = inst.operands[0].reg;
9045 inst.instruction |= inst.operands[0].reg << 8;
9046 inst.instruction |= inst.operands[1].reg << 16;
9047 inst.instruction |= inst.operands[2].reg;
9048 }
9049
9050 static void
9051 do_t_hint (void)
9052 {
9053 if (unified_syntax && inst.size_req == 4)
9054 inst.instruction = THUMB_OP32 (inst.instruction);
9055 else
9056 inst.instruction = THUMB_OP16 (inst.instruction);
9057 }
9058
9059 static void
9060 do_t_it (void)
9061 {
9062 unsigned int cond = inst.operands[0].imm;
9063
9064 constraint (current_it_mask, BAD_NOT_IT);
9065 current_it_mask = (inst.instruction & 0xf) | 0x10;
9066 current_cc = cond;
9067
9068 /* If the condition is a negative condition, invert the mask. */
9069 if ((cond & 0x1) == 0x0)
9070 {
9071 unsigned int mask = inst.instruction & 0x000f;
9072
9073 if ((mask & 0x7) == 0)
9074 /* no conversion needed */;
9075 else if ((mask & 0x3) == 0)
9076 mask ^= 0x8;
9077 else if ((mask & 0x1) == 0)
9078 mask ^= 0xC;
9079 else
9080 mask ^= 0xE;
9081
9082 inst.instruction &= 0xfff0;
9083 inst.instruction |= mask;
9084 }
9085
9086 inst.instruction |= cond << 4;
9087 }
9088
9089 /* Helper function used for both push/pop and ldm/stm. */
9090 static void
9091 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9092 {
9093 bfd_boolean load;
9094
9095 load = (inst.instruction & (1 << 20)) != 0;
9096
9097 if (mask & (1 << 13))
9098 inst.error = _("SP not allowed in register list");
9099 if (load)
9100 {
9101 if (mask & (1 << 14)
9102 && mask & (1 << 15))
9103 inst.error = _("LR and PC should not both be in register list");
9104
9105 if ((mask & (1 << base)) != 0
9106 && writeback)
9107 as_warn (_("base register should not be in register list "
9108 "when written back"));
9109 }
9110 else
9111 {
9112 if (mask & (1 << 15))
9113 inst.error = _("PC not allowed in register list");
9114
9115 if (mask & (1 << base))
9116 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9117 }
9118
9119 if ((mask & (mask - 1)) == 0)
9120 {
9121 /* Single register transfers implemented as str/ldr. */
9122 if (writeback)
9123 {
9124 if (inst.instruction & (1 << 23))
9125 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9126 else
9127 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9128 }
9129 else
9130 {
9131 if (inst.instruction & (1 << 23))
9132 inst.instruction = 0x00800000; /* ia -> [base] */
9133 else
9134 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9135 }
9136
9137 inst.instruction |= 0xf8400000;
9138 if (load)
9139 inst.instruction |= 0x00100000;
9140
9141 mask = ffs(mask) - 1;
9142 mask <<= 12;
9143 }
9144 else if (writeback)
9145 inst.instruction |= WRITE_BACK;
9146
9147 inst.instruction |= mask;
9148 inst.instruction |= base << 16;
9149 }
9150
9151 static void
9152 do_t_ldmstm (void)
9153 {
9154 /* This really doesn't seem worth it. */
9155 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9156 _("expression too complex"));
9157 constraint (inst.operands[1].writeback,
9158 _("Thumb load/store multiple does not support {reglist}^"));
9159
9160 if (unified_syntax)
9161 {
9162 bfd_boolean narrow;
9163 unsigned mask;
9164
9165 narrow = FALSE;
9166 /* See if we can use a 16-bit instruction. */
9167 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9168 && inst.size_req != 4
9169 && !(inst.operands[1].imm & ~0xff))
9170 {
9171 mask = 1 << inst.operands[0].reg;
9172
9173 if (inst.operands[0].reg <= 7
9174 && (inst.instruction == T_MNEM_stmia
9175 ? inst.operands[0].writeback
9176 : (inst.operands[0].writeback
9177 == !(inst.operands[1].imm & mask))))
9178 {
9179 if (inst.instruction == T_MNEM_stmia
9180 && (inst.operands[1].imm & mask)
9181 && (inst.operands[1].imm & (mask - 1)))
9182 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9183 inst.operands[0].reg);
9184
9185 inst.instruction = THUMB_OP16 (inst.instruction);
9186 inst.instruction |= inst.operands[0].reg << 8;
9187 inst.instruction |= inst.operands[1].imm;
9188 narrow = TRUE;
9189 }
9190 else if (inst.operands[0] .reg == REG_SP
9191 && inst.operands[0].writeback)
9192 {
9193 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9194 ? T_MNEM_push : T_MNEM_pop);
9195 inst.instruction |= inst.operands[1].imm;
9196 narrow = TRUE;
9197 }
9198 }
9199
9200 if (!narrow)
9201 {
9202 if (inst.instruction < 0xffff)
9203 inst.instruction = THUMB_OP32 (inst.instruction);
9204
9205 encode_thumb2_ldmstm(inst.operands[0].reg, inst.operands[1].imm,
9206 inst.operands[0].writeback);
9207 }
9208 }
9209 else
9210 {
9211 constraint (inst.operands[0].reg > 7
9212 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9213 constraint (inst.instruction != T_MNEM_ldmia
9214 && inst.instruction != T_MNEM_stmia,
9215 _("Thumb-2 instruction only valid in unified syntax"));
9216 if (inst.instruction == T_MNEM_stmia)
9217 {
9218 if (!inst.operands[0].writeback)
9219 as_warn (_("this instruction will write back the base register"));
9220 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9221 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9222 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9223 inst.operands[0].reg);
9224 }
9225 else
9226 {
9227 if (!inst.operands[0].writeback
9228 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9229 as_warn (_("this instruction will write back the base register"));
9230 else if (inst.operands[0].writeback
9231 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9232 as_warn (_("this instruction will not write back the base register"));
9233 }
9234
9235 inst.instruction = THUMB_OP16 (inst.instruction);
9236 inst.instruction |= inst.operands[0].reg << 8;
9237 inst.instruction |= inst.operands[1].imm;
9238 }
9239 }
9240
9241 static void
9242 do_t_ldrex (void)
9243 {
9244 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9245 || inst.operands[1].postind || inst.operands[1].writeback
9246 || inst.operands[1].immisreg || inst.operands[1].shifted
9247 || inst.operands[1].negative,
9248 BAD_ADDR_MODE);
9249
9250 inst.instruction |= inst.operands[0].reg << 12;
9251 inst.instruction |= inst.operands[1].reg << 16;
9252 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9253 }
9254
9255 static void
9256 do_t_ldrexd (void)
9257 {
9258 if (!inst.operands[1].present)
9259 {
9260 constraint (inst.operands[0].reg == REG_LR,
9261 _("r14 not allowed as first register "
9262 "when second register is omitted"));
9263 inst.operands[1].reg = inst.operands[0].reg + 1;
9264 }
9265 constraint (inst.operands[0].reg == inst.operands[1].reg,
9266 BAD_OVERLAP);
9267
9268 inst.instruction |= inst.operands[0].reg << 12;
9269 inst.instruction |= inst.operands[1].reg << 8;
9270 inst.instruction |= inst.operands[2].reg << 16;
9271 }
9272
9273 static void
9274 do_t_ldst (void)
9275 {
9276 unsigned long opcode;
9277 int Rn;
9278
9279 opcode = inst.instruction;
9280 if (unified_syntax)
9281 {
9282 if (!inst.operands[1].isreg)
9283 {
9284 if (opcode <= 0xffff)
9285 inst.instruction = THUMB_OP32 (opcode);
9286 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9287 return;
9288 }
9289 if (inst.operands[1].isreg
9290 && !inst.operands[1].writeback
9291 && !inst.operands[1].shifted && !inst.operands[1].postind
9292 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9293 && opcode <= 0xffff
9294 && inst.size_req != 4)
9295 {
9296 /* Insn may have a 16-bit form. */
9297 Rn = inst.operands[1].reg;
9298 if (inst.operands[1].immisreg)
9299 {
9300 inst.instruction = THUMB_OP16 (opcode);
9301 /* [Rn, Ri] */
9302 if (Rn <= 7 && inst.operands[1].imm <= 7)
9303 goto op16;
9304 }
9305 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9306 && opcode != T_MNEM_ldrsb)
9307 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9308 || (Rn == REG_SP && opcode == T_MNEM_str))
9309 {
9310 /* [Rn, #const] */
9311 if (Rn > 7)
9312 {
9313 if (Rn == REG_PC)
9314 {
9315 if (inst.reloc.pc_rel)
9316 opcode = T_MNEM_ldr_pc2;
9317 else
9318 opcode = T_MNEM_ldr_pc;
9319 }
9320 else
9321 {
9322 if (opcode == T_MNEM_ldr)
9323 opcode = T_MNEM_ldr_sp;
9324 else
9325 opcode = T_MNEM_str_sp;
9326 }
9327 inst.instruction = inst.operands[0].reg << 8;
9328 }
9329 else
9330 {
9331 inst.instruction = inst.operands[0].reg;
9332 inst.instruction |= inst.operands[1].reg << 3;
9333 }
9334 inst.instruction |= THUMB_OP16 (opcode);
9335 if (inst.size_req == 2)
9336 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9337 else
9338 inst.relax = opcode;
9339 return;
9340 }
9341 }
9342 /* Definitely a 32-bit variant. */
9343 inst.instruction = THUMB_OP32 (opcode);
9344 inst.instruction |= inst.operands[0].reg << 12;
9345 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9346 return;
9347 }
9348
9349 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9350
9351 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9352 {
9353 /* Only [Rn,Rm] is acceptable. */
9354 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9355 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9356 || inst.operands[1].postind || inst.operands[1].shifted
9357 || inst.operands[1].negative,
9358 _("Thumb does not support this addressing mode"));
9359 inst.instruction = THUMB_OP16 (inst.instruction);
9360 goto op16;
9361 }
9362
9363 inst.instruction = THUMB_OP16 (inst.instruction);
9364 if (!inst.operands[1].isreg)
9365 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9366 return;
9367
9368 constraint (!inst.operands[1].preind
9369 || inst.operands[1].shifted
9370 || inst.operands[1].writeback,
9371 _("Thumb does not support this addressing mode"));
9372 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9373 {
9374 constraint (inst.instruction & 0x0600,
9375 _("byte or halfword not valid for base register"));
9376 constraint (inst.operands[1].reg == REG_PC
9377 && !(inst.instruction & THUMB_LOAD_BIT),
9378 _("r15 based store not allowed"));
9379 constraint (inst.operands[1].immisreg,
9380 _("invalid base register for register offset"));
9381
9382 if (inst.operands[1].reg == REG_PC)
9383 inst.instruction = T_OPCODE_LDR_PC;
9384 else if (inst.instruction & THUMB_LOAD_BIT)
9385 inst.instruction = T_OPCODE_LDR_SP;
9386 else
9387 inst.instruction = T_OPCODE_STR_SP;
9388
9389 inst.instruction |= inst.operands[0].reg << 8;
9390 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9391 return;
9392 }
9393
9394 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9395 if (!inst.operands[1].immisreg)
9396 {
9397 /* Immediate offset. */
9398 inst.instruction |= inst.operands[0].reg;
9399 inst.instruction |= inst.operands[1].reg << 3;
9400 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9401 return;
9402 }
9403
9404 /* Register offset. */
9405 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9406 constraint (inst.operands[1].negative,
9407 _("Thumb does not support this addressing mode"));
9408
9409 op16:
9410 switch (inst.instruction)
9411 {
9412 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9413 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9414 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9415 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9416 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9417 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9418 case 0x5600 /* ldrsb */:
9419 case 0x5e00 /* ldrsh */: break;
9420 default: abort ();
9421 }
9422
9423 inst.instruction |= inst.operands[0].reg;
9424 inst.instruction |= inst.operands[1].reg << 3;
9425 inst.instruction |= inst.operands[1].imm << 6;
9426 }
9427
9428 static void
9429 do_t_ldstd (void)
9430 {
9431 if (!inst.operands[1].present)
9432 {
9433 inst.operands[1].reg = inst.operands[0].reg + 1;
9434 constraint (inst.operands[0].reg == REG_LR,
9435 _("r14 not allowed here"));
9436 }
9437 inst.instruction |= inst.operands[0].reg << 12;
9438 inst.instruction |= inst.operands[1].reg << 8;
9439 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9440
9441 }
9442
9443 static void
9444 do_t_ldstt (void)
9445 {
9446 inst.instruction |= inst.operands[0].reg << 12;
9447 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9448 }
9449
9450 static void
9451 do_t_mla (void)
9452 {
9453 inst.instruction |= inst.operands[0].reg << 8;
9454 inst.instruction |= inst.operands[1].reg << 16;
9455 inst.instruction |= inst.operands[2].reg;
9456 inst.instruction |= inst.operands[3].reg << 12;
9457 }
9458
9459 static void
9460 do_t_mlal (void)
9461 {
9462 inst.instruction |= inst.operands[0].reg << 12;
9463 inst.instruction |= inst.operands[1].reg << 8;
9464 inst.instruction |= inst.operands[2].reg << 16;
9465 inst.instruction |= inst.operands[3].reg;
9466 }
9467
9468 static void
9469 do_t_mov_cmp (void)
9470 {
9471 if (unified_syntax)
9472 {
9473 int r0off = (inst.instruction == T_MNEM_mov
9474 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9475 unsigned long opcode;
9476 bfd_boolean narrow;
9477 bfd_boolean low_regs;
9478
9479 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
9480 opcode = inst.instruction;
9481 if (current_it_mask)
9482 narrow = opcode != T_MNEM_movs;
9483 else
9484 narrow = opcode != T_MNEM_movs || low_regs;
9485 if (inst.size_req == 4
9486 || inst.operands[1].shifted)
9487 narrow = FALSE;
9488
9489 if (!inst.operands[1].isreg)
9490 {
9491 /* Immediate operand. */
9492 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9493 narrow = 0;
9494 if (low_regs && narrow)
9495 {
9496 inst.instruction = THUMB_OP16 (opcode);
9497 inst.instruction |= inst.operands[0].reg << 8;
9498 if (inst.size_req == 2)
9499 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9500 else
9501 inst.relax = opcode;
9502 }
9503 else
9504 {
9505 inst.instruction = THUMB_OP32 (inst.instruction);
9506 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9507 inst.instruction |= inst.operands[0].reg << r0off;
9508 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9509 }
9510 }
9511 else if (!narrow)
9512 {
9513 inst.instruction = THUMB_OP32 (inst.instruction);
9514 inst.instruction |= inst.operands[0].reg << r0off;
9515 encode_thumb32_shifted_operand (1);
9516 }
9517 else
9518 switch (inst.instruction)
9519 {
9520 case T_MNEM_mov:
9521 inst.instruction = T_OPCODE_MOV_HR;
9522 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9523 inst.instruction |= (inst.operands[0].reg & 0x7);
9524 inst.instruction |= inst.operands[1].reg << 3;
9525 break;
9526
9527 case T_MNEM_movs:
9528 /* We know we have low registers at this point.
9529 Generate ADD Rd, Rs, #0. */
9530 inst.instruction = T_OPCODE_ADD_I3;
9531 inst.instruction |= inst.operands[0].reg;
9532 inst.instruction |= inst.operands[1].reg << 3;
9533 break;
9534
9535 case T_MNEM_cmp:
9536 if (low_regs)
9537 {
9538 inst.instruction = T_OPCODE_CMP_LR;
9539 inst.instruction |= inst.operands[0].reg;
9540 inst.instruction |= inst.operands[1].reg << 3;
9541 }
9542 else
9543 {
9544 inst.instruction = T_OPCODE_CMP_HR;
9545 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9546 inst.instruction |= (inst.operands[0].reg & 0x7);
9547 inst.instruction |= inst.operands[1].reg << 3;
9548 }
9549 break;
9550 }
9551 return;
9552 }
9553
9554 inst.instruction = THUMB_OP16 (inst.instruction);
9555 if (inst.operands[1].isreg)
9556 {
9557 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
9558 {
9559 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9560 since a MOV instruction produces unpredictable results. */
9561 if (inst.instruction == T_OPCODE_MOV_I8)
9562 inst.instruction = T_OPCODE_ADD_I3;
9563 else
9564 inst.instruction = T_OPCODE_CMP_LR;
9565
9566 inst.instruction |= inst.operands[0].reg;
9567 inst.instruction |= inst.operands[1].reg << 3;
9568 }
9569 else
9570 {
9571 if (inst.instruction == T_OPCODE_MOV_I8)
9572 inst.instruction = T_OPCODE_MOV_HR;
9573 else
9574 inst.instruction = T_OPCODE_CMP_HR;
9575 do_t_cpy ();
9576 }
9577 }
9578 else
9579 {
9580 constraint (inst.operands[0].reg > 7,
9581 _("only lo regs allowed with immediate"));
9582 inst.instruction |= inst.operands[0].reg << 8;
9583 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9584 }
9585 }
9586
9587 static void
9588 do_t_mov16 (void)
9589 {
9590 bfd_vma imm;
9591 bfd_boolean top;
9592
9593 top = (inst.instruction & 0x00800000) != 0;
9594 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9595 {
9596 constraint (top, _(":lower16: not allowed this instruction"));
9597 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9598 }
9599 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9600 {
9601 constraint (!top, _(":upper16: not allowed this instruction"));
9602 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9603 }
9604
9605 inst.instruction |= inst.operands[0].reg << 8;
9606 if (inst.reloc.type == BFD_RELOC_UNUSED)
9607 {
9608 imm = inst.reloc.exp.X_add_number;
9609 inst.instruction |= (imm & 0xf000) << 4;
9610 inst.instruction |= (imm & 0x0800) << 15;
9611 inst.instruction |= (imm & 0x0700) << 4;
9612 inst.instruction |= (imm & 0x00ff);
9613 }
9614 }
9615
9616 static void
9617 do_t_mvn_tst (void)
9618 {
9619 if (unified_syntax)
9620 {
9621 int r0off = (inst.instruction == T_MNEM_mvn
9622 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9623 bfd_boolean narrow;
9624
9625 if (inst.size_req == 4
9626 || inst.instruction > 0xffff
9627 || inst.operands[1].shifted
9628 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9629 narrow = FALSE;
9630 else if (inst.instruction == T_MNEM_cmn)
9631 narrow = TRUE;
9632 else if (THUMB_SETS_FLAGS (inst.instruction))
9633 narrow = (current_it_mask == 0);
9634 else
9635 narrow = (current_it_mask != 0);
9636
9637 if (!inst.operands[1].isreg)
9638 {
9639 /* For an immediate, we always generate a 32-bit opcode;
9640 section relaxation will shrink it later if possible. */
9641 if (inst.instruction < 0xffff)
9642 inst.instruction = THUMB_OP32 (inst.instruction);
9643 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9644 inst.instruction |= inst.operands[0].reg << r0off;
9645 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9646 }
9647 else
9648 {
9649 /* See if we can do this with a 16-bit instruction. */
9650 if (narrow)
9651 {
9652 inst.instruction = THUMB_OP16 (inst.instruction);
9653 inst.instruction |= inst.operands[0].reg;
9654 inst.instruction |= inst.operands[1].reg << 3;
9655 }
9656 else
9657 {
9658 constraint (inst.operands[1].shifted
9659 && inst.operands[1].immisreg,
9660 _("shift must be constant"));
9661 if (inst.instruction < 0xffff)
9662 inst.instruction = THUMB_OP32 (inst.instruction);
9663 inst.instruction |= inst.operands[0].reg << r0off;
9664 encode_thumb32_shifted_operand (1);
9665 }
9666 }
9667 }
9668 else
9669 {
9670 constraint (inst.instruction > 0xffff
9671 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9672 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9673 _("unshifted register required"));
9674 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9675 BAD_HIREG);
9676
9677 inst.instruction = THUMB_OP16 (inst.instruction);
9678 inst.instruction |= inst.operands[0].reg;
9679 inst.instruction |= inst.operands[1].reg << 3;
9680 }
9681 }
9682
9683 static void
9684 do_t_mrs (void)
9685 {
9686 int flags;
9687
9688 if (do_vfp_nsyn_mrs () == SUCCESS)
9689 return;
9690
9691 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9692 if (flags == 0)
9693 {
9694 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9695 _("selected processor does not support "
9696 "requested special purpose register"));
9697 }
9698 else
9699 {
9700 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9701 _("selected processor does not support "
9702 "requested special purpose register %x"));
9703 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9704 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9705 _("'CPSR' or 'SPSR' expected"));
9706 }
9707
9708 inst.instruction |= inst.operands[0].reg << 8;
9709 inst.instruction |= (flags & SPSR_BIT) >> 2;
9710 inst.instruction |= inst.operands[1].imm & 0xff;
9711 }
9712
9713 static void
9714 do_t_msr (void)
9715 {
9716 int flags;
9717
9718 if (do_vfp_nsyn_msr () == SUCCESS)
9719 return;
9720
9721 constraint (!inst.operands[1].isreg,
9722 _("Thumb encoding does not support an immediate here"));
9723 flags = inst.operands[0].imm;
9724 if (flags & ~0xff)
9725 {
9726 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9727 _("selected processor does not support "
9728 "requested special purpose register"));
9729 }
9730 else
9731 {
9732 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9733 _("selected processor does not support "
9734 "requested special purpose register"));
9735 flags |= PSR_f;
9736 }
9737 inst.instruction |= (flags & SPSR_BIT) >> 2;
9738 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9739 inst.instruction |= (flags & 0xff);
9740 inst.instruction |= inst.operands[1].reg << 16;
9741 }
9742
9743 static void
9744 do_t_mul (void)
9745 {
9746 if (!inst.operands[2].present)
9747 inst.operands[2].reg = inst.operands[0].reg;
9748
9749 /* There is no 32-bit MULS and no 16-bit MUL. */
9750 if (unified_syntax && inst.instruction == T_MNEM_mul)
9751 {
9752 inst.instruction = THUMB_OP32 (inst.instruction);
9753 inst.instruction |= inst.operands[0].reg << 8;
9754 inst.instruction |= inst.operands[1].reg << 16;
9755 inst.instruction |= inst.operands[2].reg << 0;
9756 }
9757 else
9758 {
9759 constraint (!unified_syntax
9760 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9761 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9762 BAD_HIREG);
9763
9764 inst.instruction = THUMB_OP16 (inst.instruction);
9765 inst.instruction |= inst.operands[0].reg;
9766
9767 if (inst.operands[0].reg == inst.operands[1].reg)
9768 inst.instruction |= inst.operands[2].reg << 3;
9769 else if (inst.operands[0].reg == inst.operands[2].reg)
9770 inst.instruction |= inst.operands[1].reg << 3;
9771 else
9772 constraint (1, _("dest must overlap one source register"));
9773 }
9774 }
9775
9776 static void
9777 do_t_mull (void)
9778 {
9779 inst.instruction |= inst.operands[0].reg << 12;
9780 inst.instruction |= inst.operands[1].reg << 8;
9781 inst.instruction |= inst.operands[2].reg << 16;
9782 inst.instruction |= inst.operands[3].reg;
9783
9784 if (inst.operands[0].reg == inst.operands[1].reg)
9785 as_tsktsk (_("rdhi and rdlo must be different"));
9786 }
9787
9788 static void
9789 do_t_nop (void)
9790 {
9791 if (unified_syntax)
9792 {
9793 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9794 {
9795 inst.instruction = THUMB_OP32 (inst.instruction);
9796 inst.instruction |= inst.operands[0].imm;
9797 }
9798 else
9799 {
9800 inst.instruction = THUMB_OP16 (inst.instruction);
9801 inst.instruction |= inst.operands[0].imm << 4;
9802 }
9803 }
9804 else
9805 {
9806 constraint (inst.operands[0].present,
9807 _("Thumb does not support NOP with hints"));
9808 inst.instruction = 0x46c0;
9809 }
9810 }
9811
9812 static void
9813 do_t_neg (void)
9814 {
9815 if (unified_syntax)
9816 {
9817 bfd_boolean narrow;
9818
9819 if (THUMB_SETS_FLAGS (inst.instruction))
9820 narrow = (current_it_mask == 0);
9821 else
9822 narrow = (current_it_mask != 0);
9823 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9824 narrow = FALSE;
9825 if (inst.size_req == 4)
9826 narrow = FALSE;
9827
9828 if (!narrow)
9829 {
9830 inst.instruction = THUMB_OP32 (inst.instruction);
9831 inst.instruction |= inst.operands[0].reg << 8;
9832 inst.instruction |= inst.operands[1].reg << 16;
9833 }
9834 else
9835 {
9836 inst.instruction = THUMB_OP16 (inst.instruction);
9837 inst.instruction |= inst.operands[0].reg;
9838 inst.instruction |= inst.operands[1].reg << 3;
9839 }
9840 }
9841 else
9842 {
9843 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9844 BAD_HIREG);
9845 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9846
9847 inst.instruction = THUMB_OP16 (inst.instruction);
9848 inst.instruction |= inst.operands[0].reg;
9849 inst.instruction |= inst.operands[1].reg << 3;
9850 }
9851 }
9852
9853 static void
9854 do_t_pkhbt (void)
9855 {
9856 inst.instruction |= inst.operands[0].reg << 8;
9857 inst.instruction |= inst.operands[1].reg << 16;
9858 inst.instruction |= inst.operands[2].reg;
9859 if (inst.operands[3].present)
9860 {
9861 unsigned int val = inst.reloc.exp.X_add_number;
9862 constraint (inst.reloc.exp.X_op != O_constant,
9863 _("expression too complex"));
9864 inst.instruction |= (val & 0x1c) << 10;
9865 inst.instruction |= (val & 0x03) << 6;
9866 }
9867 }
9868
9869 static void
9870 do_t_pkhtb (void)
9871 {
9872 if (!inst.operands[3].present)
9873 inst.instruction &= ~0x00000020;
9874 do_t_pkhbt ();
9875 }
9876
9877 static void
9878 do_t_pld (void)
9879 {
9880 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9881 }
9882
9883 static void
9884 do_t_push_pop (void)
9885 {
9886 unsigned mask;
9887
9888 constraint (inst.operands[0].writeback,
9889 _("push/pop do not support {reglist}^"));
9890 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9891 _("expression too complex"));
9892
9893 mask = inst.operands[0].imm;
9894 if ((mask & ~0xff) == 0)
9895 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
9896 else if ((inst.instruction == T_MNEM_push
9897 && (mask & ~0xff) == 1 << REG_LR)
9898 || (inst.instruction == T_MNEM_pop
9899 && (mask & ~0xff) == 1 << REG_PC))
9900 {
9901 inst.instruction = THUMB_OP16 (inst.instruction);
9902 inst.instruction |= THUMB_PP_PC_LR;
9903 inst.instruction |= mask & 0xff;
9904 }
9905 else if (unified_syntax)
9906 {
9907 inst.instruction = THUMB_OP32 (inst.instruction);
9908 encode_thumb2_ldmstm(13, mask, TRUE);
9909 }
9910 else
9911 {
9912 inst.error = _("invalid register list to push/pop instruction");
9913 return;
9914 }
9915 }
9916
9917 static void
9918 do_t_rbit (void)
9919 {
9920 inst.instruction |= inst.operands[0].reg << 8;
9921 inst.instruction |= inst.operands[1].reg << 16;
9922 }
9923
9924 static void
9925 do_t_rev (void)
9926 {
9927 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9928 && inst.size_req != 4)
9929 {
9930 inst.instruction = THUMB_OP16 (inst.instruction);
9931 inst.instruction |= inst.operands[0].reg;
9932 inst.instruction |= inst.operands[1].reg << 3;
9933 }
9934 else if (unified_syntax)
9935 {
9936 inst.instruction = THUMB_OP32 (inst.instruction);
9937 inst.instruction |= inst.operands[0].reg << 8;
9938 inst.instruction |= inst.operands[1].reg << 16;
9939 inst.instruction |= inst.operands[1].reg;
9940 }
9941 else
9942 inst.error = BAD_HIREG;
9943 }
9944
9945 static void
9946 do_t_rsb (void)
9947 {
9948 int Rd, Rs;
9949
9950 Rd = inst.operands[0].reg;
9951 Rs = (inst.operands[1].present
9952 ? inst.operands[1].reg /* Rd, Rs, foo */
9953 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9954
9955 inst.instruction |= Rd << 8;
9956 inst.instruction |= Rs << 16;
9957 if (!inst.operands[2].isreg)
9958 {
9959 bfd_boolean narrow;
9960
9961 if ((inst.instruction & 0x00100000) != 0)
9962 narrow = (current_it_mask == 0);
9963 else
9964 narrow = (current_it_mask != 0);
9965
9966 if (Rd > 7 || Rs > 7)
9967 narrow = FALSE;
9968
9969 if (inst.size_req == 4 || !unified_syntax)
9970 narrow = FALSE;
9971
9972 if (inst.reloc.exp.X_op != O_constant
9973 || inst.reloc.exp.X_add_number != 0)
9974 narrow = FALSE;
9975
9976 /* Turn rsb #0 into 16-bit neg. We should probably do this via
9977 relaxation, but it doesn't seem worth the hassle. */
9978 if (narrow)
9979 {
9980 inst.reloc.type = BFD_RELOC_UNUSED;
9981 inst.instruction = THUMB_OP16 (T_MNEM_negs);
9982 inst.instruction |= Rs << 3;
9983 inst.instruction |= Rd;
9984 }
9985 else
9986 {
9987 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9988 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9989 }
9990 }
9991 else
9992 encode_thumb32_shifted_operand (2);
9993 }
9994
9995 static void
9996 do_t_setend (void)
9997 {
9998 constraint (current_it_mask, BAD_NOT_IT);
9999 if (inst.operands[0].imm)
10000 inst.instruction |= 0x8;
10001 }
10002
10003 static void
10004 do_t_shift (void)
10005 {
10006 if (!inst.operands[1].present)
10007 inst.operands[1].reg = inst.operands[0].reg;
10008
10009 if (unified_syntax)
10010 {
10011 bfd_boolean narrow;
10012 int shift_kind;
10013
10014 switch (inst.instruction)
10015 {
10016 case T_MNEM_asr:
10017 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
10018 case T_MNEM_lsl:
10019 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
10020 case T_MNEM_lsr:
10021 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
10022 case T_MNEM_ror:
10023 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
10024 default: abort ();
10025 }
10026
10027 if (THUMB_SETS_FLAGS (inst.instruction))
10028 narrow = (current_it_mask == 0);
10029 else
10030 narrow = (current_it_mask != 0);
10031 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10032 narrow = FALSE;
10033 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
10034 narrow = FALSE;
10035 if (inst.operands[2].isreg
10036 && (inst.operands[1].reg != inst.operands[0].reg
10037 || inst.operands[2].reg > 7))
10038 narrow = FALSE;
10039 if (inst.size_req == 4)
10040 narrow = FALSE;
10041
10042 if (!narrow)
10043 {
10044 if (inst.operands[2].isreg)
10045 {
10046 inst.instruction = THUMB_OP32 (inst.instruction);
10047 inst.instruction |= inst.operands[0].reg << 8;
10048 inst.instruction |= inst.operands[1].reg << 16;
10049 inst.instruction |= inst.operands[2].reg;
10050 }
10051 else
10052 {
10053 inst.operands[1].shifted = 1;
10054 inst.operands[1].shift_kind = shift_kind;
10055 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
10056 ? T_MNEM_movs : T_MNEM_mov);
10057 inst.instruction |= inst.operands[0].reg << 8;
10058 encode_thumb32_shifted_operand (1);
10059 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10060 inst.reloc.type = BFD_RELOC_UNUSED;
10061 }
10062 }
10063 else
10064 {
10065 if (inst.operands[2].isreg)
10066 {
10067 switch (shift_kind)
10068 {
10069 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
10070 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
10071 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
10072 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
10073 default: abort ();
10074 }
10075
10076 inst.instruction |= inst.operands[0].reg;
10077 inst.instruction |= inst.operands[2].reg << 3;
10078 }
10079 else
10080 {
10081 switch (shift_kind)
10082 {
10083 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10084 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10085 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10086 default: abort ();
10087 }
10088 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10089 inst.instruction |= inst.operands[0].reg;
10090 inst.instruction |= inst.operands[1].reg << 3;
10091 }
10092 }
10093 }
10094 else
10095 {
10096 constraint (inst.operands[0].reg > 7
10097 || inst.operands[1].reg > 7, BAD_HIREG);
10098 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10099
10100 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
10101 {
10102 constraint (inst.operands[2].reg > 7, BAD_HIREG);
10103 constraint (inst.operands[0].reg != inst.operands[1].reg,
10104 _("source1 and dest must be same register"));
10105
10106 switch (inst.instruction)
10107 {
10108 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
10109 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
10110 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
10111 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10112 default: abort ();
10113 }
10114
10115 inst.instruction |= inst.operands[0].reg;
10116 inst.instruction |= inst.operands[2].reg << 3;
10117 }
10118 else
10119 {
10120 switch (inst.instruction)
10121 {
10122 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10123 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10124 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10125 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10126 default: abort ();
10127 }
10128 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10129 inst.instruction |= inst.operands[0].reg;
10130 inst.instruction |= inst.operands[1].reg << 3;
10131 }
10132 }
10133 }
10134
10135 static void
10136 do_t_simd (void)
10137 {
10138 inst.instruction |= inst.operands[0].reg << 8;
10139 inst.instruction |= inst.operands[1].reg << 16;
10140 inst.instruction |= inst.operands[2].reg;
10141 }
10142
10143 static void
10144 do_t_smc (void)
10145 {
10146 unsigned int value = inst.reloc.exp.X_add_number;
10147 constraint (inst.reloc.exp.X_op != O_constant,
10148 _("expression too complex"));
10149 inst.reloc.type = BFD_RELOC_UNUSED;
10150 inst.instruction |= (value & 0xf000) >> 12;
10151 inst.instruction |= (value & 0x0ff0);
10152 inst.instruction |= (value & 0x000f) << 16;
10153 }
10154
10155 static void
10156 do_t_ssat (void)
10157 {
10158 inst.instruction |= inst.operands[0].reg << 8;
10159 inst.instruction |= inst.operands[1].imm - 1;
10160 inst.instruction |= inst.operands[2].reg << 16;
10161
10162 if (inst.operands[3].present)
10163 {
10164 constraint (inst.reloc.exp.X_op != O_constant,
10165 _("expression too complex"));
10166
10167 if (inst.reloc.exp.X_add_number != 0)
10168 {
10169 if (inst.operands[3].shift_kind == SHIFT_ASR)
10170 inst.instruction |= 0x00200000; /* sh bit */
10171 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10172 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10173 }
10174 inst.reloc.type = BFD_RELOC_UNUSED;
10175 }
10176 }
10177
10178 static void
10179 do_t_ssat16 (void)
10180 {
10181 inst.instruction |= inst.operands[0].reg << 8;
10182 inst.instruction |= inst.operands[1].imm - 1;
10183 inst.instruction |= inst.operands[2].reg << 16;
10184 }
10185
10186 static void
10187 do_t_strex (void)
10188 {
10189 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10190 || inst.operands[2].postind || inst.operands[2].writeback
10191 || inst.operands[2].immisreg || inst.operands[2].shifted
10192 || inst.operands[2].negative,
10193 BAD_ADDR_MODE);
10194
10195 inst.instruction |= inst.operands[0].reg << 8;
10196 inst.instruction |= inst.operands[1].reg << 12;
10197 inst.instruction |= inst.operands[2].reg << 16;
10198 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10199 }
10200
10201 static void
10202 do_t_strexd (void)
10203 {
10204 if (!inst.operands[2].present)
10205 inst.operands[2].reg = inst.operands[1].reg + 1;
10206
10207 constraint (inst.operands[0].reg == inst.operands[1].reg
10208 || inst.operands[0].reg == inst.operands[2].reg
10209 || inst.operands[0].reg == inst.operands[3].reg
10210 || inst.operands[1].reg == inst.operands[2].reg,
10211 BAD_OVERLAP);
10212
10213 inst.instruction |= inst.operands[0].reg;
10214 inst.instruction |= inst.operands[1].reg << 12;
10215 inst.instruction |= inst.operands[2].reg << 8;
10216 inst.instruction |= inst.operands[3].reg << 16;
10217 }
10218
10219 static void
10220 do_t_sxtah (void)
10221 {
10222 inst.instruction |= inst.operands[0].reg << 8;
10223 inst.instruction |= inst.operands[1].reg << 16;
10224 inst.instruction |= inst.operands[2].reg;
10225 inst.instruction |= inst.operands[3].imm << 4;
10226 }
10227
10228 static void
10229 do_t_sxth (void)
10230 {
10231 if (inst.instruction <= 0xffff && inst.size_req != 4
10232 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10233 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10234 {
10235 inst.instruction = THUMB_OP16 (inst.instruction);
10236 inst.instruction |= inst.operands[0].reg;
10237 inst.instruction |= inst.operands[1].reg << 3;
10238 }
10239 else if (unified_syntax)
10240 {
10241 if (inst.instruction <= 0xffff)
10242 inst.instruction = THUMB_OP32 (inst.instruction);
10243 inst.instruction |= inst.operands[0].reg << 8;
10244 inst.instruction |= inst.operands[1].reg;
10245 inst.instruction |= inst.operands[2].imm << 4;
10246 }
10247 else
10248 {
10249 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10250 _("Thumb encoding does not support rotation"));
10251 constraint (1, BAD_HIREG);
10252 }
10253 }
10254
10255 static void
10256 do_t_swi (void)
10257 {
10258 inst.reloc.type = BFD_RELOC_ARM_SWI;
10259 }
10260
10261 static void
10262 do_t_tb (void)
10263 {
10264 int half;
10265
10266 half = (inst.instruction & 0x10) != 0;
10267 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10268 constraint (inst.operands[0].immisreg,
10269 _("instruction requires register index"));
10270 constraint (inst.operands[0].imm == 15,
10271 _("PC is not a valid index register"));
10272 constraint (!half && inst.operands[0].shifted,
10273 _("instruction does not allow shifted index"));
10274 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
10275 }
10276
10277 static void
10278 do_t_usat (void)
10279 {
10280 inst.instruction |= inst.operands[0].reg << 8;
10281 inst.instruction |= inst.operands[1].imm;
10282 inst.instruction |= inst.operands[2].reg << 16;
10283
10284 if (inst.operands[3].present)
10285 {
10286 constraint (inst.reloc.exp.X_op != O_constant,
10287 _("expression too complex"));
10288 if (inst.reloc.exp.X_add_number != 0)
10289 {
10290 if (inst.operands[3].shift_kind == SHIFT_ASR)
10291 inst.instruction |= 0x00200000; /* sh bit */
10292
10293 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10294 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10295 }
10296 inst.reloc.type = BFD_RELOC_UNUSED;
10297 }
10298 }
10299
10300 static void
10301 do_t_usat16 (void)
10302 {
10303 inst.instruction |= inst.operands[0].reg << 8;
10304 inst.instruction |= inst.operands[1].imm;
10305 inst.instruction |= inst.operands[2].reg << 16;
10306 }
10307
10308 /* Neon instruction encoder helpers. */
10309
10310 /* Encodings for the different types for various Neon opcodes. */
10311
10312 /* An "invalid" code for the following tables. */
10313 #define N_INV -1u
10314
10315 struct neon_tab_entry
10316 {
10317 unsigned integer;
10318 unsigned float_or_poly;
10319 unsigned scalar_or_imm;
10320 };
10321
10322 /* Map overloaded Neon opcodes to their respective encodings. */
10323 #define NEON_ENC_TAB \
10324 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10325 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10326 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10327 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10328 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10329 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10330 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10331 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10332 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10333 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10334 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10335 /* Register variants of the following two instructions are encoded as
10336 vcge / vcgt with the operands reversed. */ \
10337 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10338 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10339 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10340 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10341 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10342 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10343 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10344 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10345 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10346 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10347 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10348 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10349 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10350 X(vshl, 0x0000400, N_INV, 0x0800510), \
10351 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10352 X(vand, 0x0000110, N_INV, 0x0800030), \
10353 X(vbic, 0x0100110, N_INV, 0x0800030), \
10354 X(veor, 0x1000110, N_INV, N_INV), \
10355 X(vorn, 0x0300110, N_INV, 0x0800010), \
10356 X(vorr, 0x0200110, N_INV, 0x0800010), \
10357 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10358 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10359 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10360 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10361 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10362 X(vst1, 0x0000000, 0x0800000, N_INV), \
10363 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10364 X(vst2, 0x0000100, 0x0800100, N_INV), \
10365 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10366 X(vst3, 0x0000200, 0x0800200, N_INV), \
10367 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10368 X(vst4, 0x0000300, 0x0800300, N_INV), \
10369 X(vmovn, 0x1b20200, N_INV, N_INV), \
10370 X(vtrn, 0x1b20080, N_INV, N_INV), \
10371 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10372 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10373 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10374 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10375 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10376 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10377 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10378 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10379 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10380
10381 enum neon_opc
10382 {
10383 #define X(OPC,I,F,S) N_MNEM_##OPC
10384 NEON_ENC_TAB
10385 #undef X
10386 };
10387
10388 static const struct neon_tab_entry neon_enc_tab[] =
10389 {
10390 #define X(OPC,I,F,S) { (I), (F), (S) }
10391 NEON_ENC_TAB
10392 #undef X
10393 };
10394
10395 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10396 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10397 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10398 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10399 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10400 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10401 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10402 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10403 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10404 #define NEON_ENC_SINGLE(X) \
10405 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10406 #define NEON_ENC_DOUBLE(X) \
10407 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10408
10409 /* Define shapes for instruction operands. The following mnemonic characters
10410 are used in this table:
10411
10412 F - VFP S<n> register
10413 D - Neon D<n> register
10414 Q - Neon Q<n> register
10415 I - Immediate
10416 S - Scalar
10417 R - ARM register
10418 L - D<n> register list
10419
10420 This table is used to generate various data:
10421 - enumerations of the form NS_DDR to be used as arguments to
10422 neon_select_shape.
10423 - a table classifying shapes into single, double, quad, mixed.
10424 - a table used to drive neon_select_shape.
10425 */
10426
10427 #define NEON_SHAPE_DEF \
10428 X(3, (D, D, D), DOUBLE), \
10429 X(3, (Q, Q, Q), QUAD), \
10430 X(3, (D, D, I), DOUBLE), \
10431 X(3, (Q, Q, I), QUAD), \
10432 X(3, (D, D, S), DOUBLE), \
10433 X(3, (Q, Q, S), QUAD), \
10434 X(2, (D, D), DOUBLE), \
10435 X(2, (Q, Q), QUAD), \
10436 X(2, (D, S), DOUBLE), \
10437 X(2, (Q, S), QUAD), \
10438 X(2, (D, R), DOUBLE), \
10439 X(2, (Q, R), QUAD), \
10440 X(2, (D, I), DOUBLE), \
10441 X(2, (Q, I), QUAD), \
10442 X(3, (D, L, D), DOUBLE), \
10443 X(2, (D, Q), MIXED), \
10444 X(2, (Q, D), MIXED), \
10445 X(3, (D, Q, I), MIXED), \
10446 X(3, (Q, D, I), MIXED), \
10447 X(3, (Q, D, D), MIXED), \
10448 X(3, (D, Q, Q), MIXED), \
10449 X(3, (Q, Q, D), MIXED), \
10450 X(3, (Q, D, S), MIXED), \
10451 X(3, (D, Q, S), MIXED), \
10452 X(4, (D, D, D, I), DOUBLE), \
10453 X(4, (Q, Q, Q, I), QUAD), \
10454 X(2, (F, F), SINGLE), \
10455 X(3, (F, F, F), SINGLE), \
10456 X(2, (F, I), SINGLE), \
10457 X(2, (F, D), MIXED), \
10458 X(2, (D, F), MIXED), \
10459 X(3, (F, F, I), MIXED), \
10460 X(4, (R, R, F, F), SINGLE), \
10461 X(4, (F, F, R, R), SINGLE), \
10462 X(3, (D, R, R), DOUBLE), \
10463 X(3, (R, R, D), DOUBLE), \
10464 X(2, (S, R), SINGLE), \
10465 X(2, (R, S), SINGLE), \
10466 X(2, (F, R), SINGLE), \
10467 X(2, (R, F), SINGLE)
10468
10469 #define S2(A,B) NS_##A##B
10470 #define S3(A,B,C) NS_##A##B##C
10471 #define S4(A,B,C,D) NS_##A##B##C##D
10472
10473 #define X(N, L, C) S##N L
10474
10475 enum neon_shape
10476 {
10477 NEON_SHAPE_DEF,
10478 NS_NULL
10479 };
10480
10481 #undef X
10482 #undef S2
10483 #undef S3
10484 #undef S4
10485
10486 enum neon_shape_class
10487 {
10488 SC_SINGLE,
10489 SC_DOUBLE,
10490 SC_QUAD,
10491 SC_MIXED
10492 };
10493
10494 #define X(N, L, C) SC_##C
10495
10496 static enum neon_shape_class neon_shape_class[] =
10497 {
10498 NEON_SHAPE_DEF
10499 };
10500
10501 #undef X
10502
10503 enum neon_shape_el
10504 {
10505 SE_F,
10506 SE_D,
10507 SE_Q,
10508 SE_I,
10509 SE_S,
10510 SE_R,
10511 SE_L
10512 };
10513
10514 /* Register widths of above. */
10515 static unsigned neon_shape_el_size[] =
10516 {
10517 32,
10518 64,
10519 128,
10520 0,
10521 32,
10522 32,
10523 0
10524 };
10525
10526 struct neon_shape_info
10527 {
10528 unsigned els;
10529 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
10530 };
10531
10532 #define S2(A,B) { SE_##A, SE_##B }
10533 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10534 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10535
10536 #define X(N, L, C) { N, S##N L }
10537
10538 static struct neon_shape_info neon_shape_tab[] =
10539 {
10540 NEON_SHAPE_DEF
10541 };
10542
10543 #undef X
10544 #undef S2
10545 #undef S3
10546 #undef S4
10547
10548 /* Bit masks used in type checking given instructions.
10549 'N_EQK' means the type must be the same as (or based on in some way) the key
10550 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10551 set, various other bits can be set as well in order to modify the meaning of
10552 the type constraint. */
10553
10554 enum neon_type_mask
10555 {
10556 N_S8 = 0x000001,
10557 N_S16 = 0x000002,
10558 N_S32 = 0x000004,
10559 N_S64 = 0x000008,
10560 N_U8 = 0x000010,
10561 N_U16 = 0x000020,
10562 N_U32 = 0x000040,
10563 N_U64 = 0x000080,
10564 N_I8 = 0x000100,
10565 N_I16 = 0x000200,
10566 N_I32 = 0x000400,
10567 N_I64 = 0x000800,
10568 N_8 = 0x001000,
10569 N_16 = 0x002000,
10570 N_32 = 0x004000,
10571 N_64 = 0x008000,
10572 N_P8 = 0x010000,
10573 N_P16 = 0x020000,
10574 N_F32 = 0x040000,
10575 N_F64 = 0x080000,
10576 N_KEY = 0x100000, /* key element (main type specifier). */
10577 N_EQK = 0x200000, /* given operand has the same type & size as the key. */
10578 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */
10579 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
10580 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
10581 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
10582 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10583 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
10584 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
10585 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10586 N_UTYP = 0,
10587 N_MAX_NONSPECIAL = N_F64
10588 };
10589
10590 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10591
10592 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10593 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10594 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10595 #define N_SUF_32 (N_SU_32 | N_F32)
10596 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10597 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10598
10599 /* Pass this as the first type argument to neon_check_type to ignore types
10600 altogether. */
10601 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10602
10603 /* Select a "shape" for the current instruction (describing register types or
10604 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10605 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10606 function of operand parsing, so this function doesn't need to be called.
10607 Shapes should be listed in order of decreasing length. */
10608
10609 static enum neon_shape
10610 neon_select_shape (enum neon_shape shape, ...)
10611 {
10612 va_list ap;
10613 enum neon_shape first_shape = shape;
10614
10615 /* Fix missing optional operands. FIXME: we don't know at this point how
10616 many arguments we should have, so this makes the assumption that we have
10617 > 1. This is true of all current Neon opcodes, I think, but may not be
10618 true in the future. */
10619 if (!inst.operands[1].present)
10620 inst.operands[1] = inst.operands[0];
10621
10622 va_start (ap, shape);
10623
10624 for (; shape != NS_NULL; shape = va_arg (ap, int))
10625 {
10626 unsigned j;
10627 int matches = 1;
10628
10629 for (j = 0; j < neon_shape_tab[shape].els; j++)
10630 {
10631 if (!inst.operands[j].present)
10632 {
10633 matches = 0;
10634 break;
10635 }
10636
10637 switch (neon_shape_tab[shape].el[j])
10638 {
10639 case SE_F:
10640 if (!(inst.operands[j].isreg
10641 && inst.operands[j].isvec
10642 && inst.operands[j].issingle
10643 && !inst.operands[j].isquad))
10644 matches = 0;
10645 break;
10646
10647 case SE_D:
10648 if (!(inst.operands[j].isreg
10649 && inst.operands[j].isvec
10650 && !inst.operands[j].isquad
10651 && !inst.operands[j].issingle))
10652 matches = 0;
10653 break;
10654
10655 case SE_R:
10656 if (!(inst.operands[j].isreg
10657 && !inst.operands[j].isvec))
10658 matches = 0;
10659 break;
10660
10661 case SE_Q:
10662 if (!(inst.operands[j].isreg
10663 && inst.operands[j].isvec
10664 && inst.operands[j].isquad
10665 && !inst.operands[j].issingle))
10666 matches = 0;
10667 break;
10668
10669 case SE_I:
10670 if (!(!inst.operands[j].isreg
10671 && !inst.operands[j].isscalar))
10672 matches = 0;
10673 break;
10674
10675 case SE_S:
10676 if (!(!inst.operands[j].isreg
10677 && inst.operands[j].isscalar))
10678 matches = 0;
10679 break;
10680
10681 case SE_L:
10682 break;
10683 }
10684 }
10685 if (matches)
10686 break;
10687 }
10688
10689 va_end (ap);
10690
10691 if (shape == NS_NULL && first_shape != NS_NULL)
10692 first_error (_("invalid instruction shape"));
10693
10694 return shape;
10695 }
10696
10697 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10698 means the Q bit should be set). */
10699
10700 static int
10701 neon_quad (enum neon_shape shape)
10702 {
10703 return neon_shape_class[shape] == SC_QUAD;
10704 }
10705
10706 static void
10707 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10708 unsigned *g_size)
10709 {
10710 /* Allow modification to be made to types which are constrained to be
10711 based on the key element, based on bits set alongside N_EQK. */
10712 if ((typebits & N_EQK) != 0)
10713 {
10714 if ((typebits & N_HLF) != 0)
10715 *g_size /= 2;
10716 else if ((typebits & N_DBL) != 0)
10717 *g_size *= 2;
10718 if ((typebits & N_SGN) != 0)
10719 *g_type = NT_signed;
10720 else if ((typebits & N_UNS) != 0)
10721 *g_type = NT_unsigned;
10722 else if ((typebits & N_INT) != 0)
10723 *g_type = NT_integer;
10724 else if ((typebits & N_FLT) != 0)
10725 *g_type = NT_float;
10726 else if ((typebits & N_SIZ) != 0)
10727 *g_type = NT_untyped;
10728 }
10729 }
10730
10731 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10732 operand type, i.e. the single type specified in a Neon instruction when it
10733 is the only one given. */
10734
10735 static struct neon_type_el
10736 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10737 {
10738 struct neon_type_el dest = *key;
10739
10740 assert ((thisarg & N_EQK) != 0);
10741
10742 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10743
10744 return dest;
10745 }
10746
10747 /* Convert Neon type and size into compact bitmask representation. */
10748
10749 static enum neon_type_mask
10750 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10751 {
10752 switch (type)
10753 {
10754 case NT_untyped:
10755 switch (size)
10756 {
10757 case 8: return N_8;
10758 case 16: return N_16;
10759 case 32: return N_32;
10760 case 64: return N_64;
10761 default: ;
10762 }
10763 break;
10764
10765 case NT_integer:
10766 switch (size)
10767 {
10768 case 8: return N_I8;
10769 case 16: return N_I16;
10770 case 32: return N_I32;
10771 case 64: return N_I64;
10772 default: ;
10773 }
10774 break;
10775
10776 case NT_float:
10777 switch (size)
10778 {
10779 case 32: return N_F32;
10780 case 64: return N_F64;
10781 default: ;
10782 }
10783 break;
10784
10785 case NT_poly:
10786 switch (size)
10787 {
10788 case 8: return N_P8;
10789 case 16: return N_P16;
10790 default: ;
10791 }
10792 break;
10793
10794 case NT_signed:
10795 switch (size)
10796 {
10797 case 8: return N_S8;
10798 case 16: return N_S16;
10799 case 32: return N_S32;
10800 case 64: return N_S64;
10801 default: ;
10802 }
10803 break;
10804
10805 case NT_unsigned:
10806 switch (size)
10807 {
10808 case 8: return N_U8;
10809 case 16: return N_U16;
10810 case 32: return N_U32;
10811 case 64: return N_U64;
10812 default: ;
10813 }
10814 break;
10815
10816 default: ;
10817 }
10818
10819 return N_UTYP;
10820 }
10821
10822 /* Convert compact Neon bitmask type representation to a type and size. Only
10823 handles the case where a single bit is set in the mask. */
10824
10825 static int
10826 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10827 enum neon_type_mask mask)
10828 {
10829 if ((mask & N_EQK) != 0)
10830 return FAIL;
10831
10832 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10833 *size = 8;
10834 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10835 *size = 16;
10836 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10837 *size = 32;
10838 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10839 *size = 64;
10840 else
10841 return FAIL;
10842
10843 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10844 *type = NT_signed;
10845 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10846 *type = NT_unsigned;
10847 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10848 *type = NT_integer;
10849 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10850 *type = NT_untyped;
10851 else if ((mask & (N_P8 | N_P16)) != 0)
10852 *type = NT_poly;
10853 else if ((mask & (N_F32 | N_F64)) != 0)
10854 *type = NT_float;
10855 else
10856 return FAIL;
10857
10858 return SUCCESS;
10859 }
10860
10861 /* Modify a bitmask of allowed types. This is only needed for type
10862 relaxation. */
10863
10864 static unsigned
10865 modify_types_allowed (unsigned allowed, unsigned mods)
10866 {
10867 unsigned size;
10868 enum neon_el_type type;
10869 unsigned destmask;
10870 int i;
10871
10872 destmask = 0;
10873
10874 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
10875 {
10876 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
10877 {
10878 neon_modify_type_size (mods, &type, &size);
10879 destmask |= type_chk_of_el_type (type, size);
10880 }
10881 }
10882
10883 return destmask;
10884 }
10885
10886 /* Check type and return type classification.
10887 The manual states (paraphrase): If one datatype is given, it indicates the
10888 type given in:
10889 - the second operand, if there is one
10890 - the operand, if there is no second operand
10891 - the result, if there are no operands.
10892 This isn't quite good enough though, so we use a concept of a "key" datatype
10893 which is set on a per-instruction basis, which is the one which matters when
10894 only one data type is written.
10895 Note: this function has side-effects (e.g. filling in missing operands). All
10896 Neon instructions should call it before performing bit encoding. */
10897
10898 static struct neon_type_el
10899 neon_check_type (unsigned els, enum neon_shape ns, ...)
10900 {
10901 va_list ap;
10902 unsigned i, pass, key_el = 0;
10903 unsigned types[NEON_MAX_TYPE_ELS];
10904 enum neon_el_type k_type = NT_invtype;
10905 unsigned k_size = -1u;
10906 struct neon_type_el badtype = {NT_invtype, -1};
10907 unsigned key_allowed = 0;
10908
10909 /* Optional registers in Neon instructions are always (not) in operand 1.
10910 Fill in the missing operand here, if it was omitted. */
10911 if (els > 1 && !inst.operands[1].present)
10912 inst.operands[1] = inst.operands[0];
10913
10914 /* Suck up all the varargs. */
10915 va_start (ap, ns);
10916 for (i = 0; i < els; i++)
10917 {
10918 unsigned thisarg = va_arg (ap, unsigned);
10919 if (thisarg == N_IGNORE_TYPE)
10920 {
10921 va_end (ap);
10922 return badtype;
10923 }
10924 types[i] = thisarg;
10925 if ((thisarg & N_KEY) != 0)
10926 key_el = i;
10927 }
10928 va_end (ap);
10929
10930 if (inst.vectype.elems > 0)
10931 for (i = 0; i < els; i++)
10932 if (inst.operands[i].vectype.type != NT_invtype)
10933 {
10934 first_error (_("types specified in both the mnemonic and operands"));
10935 return badtype;
10936 }
10937
10938 /* Duplicate inst.vectype elements here as necessary.
10939 FIXME: No idea if this is exactly the same as the ARM assembler,
10940 particularly when an insn takes one register and one non-register
10941 operand. */
10942 if (inst.vectype.elems == 1 && els > 1)
10943 {
10944 unsigned j;
10945 inst.vectype.elems = els;
10946 inst.vectype.el[key_el] = inst.vectype.el[0];
10947 for (j = 0; j < els; j++)
10948 if (j != key_el)
10949 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10950 types[j]);
10951 }
10952 else if (inst.vectype.elems == 0 && els > 0)
10953 {
10954 unsigned j;
10955 /* No types were given after the mnemonic, so look for types specified
10956 after each operand. We allow some flexibility here; as long as the
10957 "key" operand has a type, we can infer the others. */
10958 for (j = 0; j < els; j++)
10959 if (inst.operands[j].vectype.type != NT_invtype)
10960 inst.vectype.el[j] = inst.operands[j].vectype;
10961
10962 if (inst.operands[key_el].vectype.type != NT_invtype)
10963 {
10964 for (j = 0; j < els; j++)
10965 if (inst.operands[j].vectype.type == NT_invtype)
10966 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10967 types[j]);
10968 }
10969 else
10970 {
10971 first_error (_("operand types can't be inferred"));
10972 return badtype;
10973 }
10974 }
10975 else if (inst.vectype.elems != els)
10976 {
10977 first_error (_("type specifier has the wrong number of parts"));
10978 return badtype;
10979 }
10980
10981 for (pass = 0; pass < 2; pass++)
10982 {
10983 for (i = 0; i < els; i++)
10984 {
10985 unsigned thisarg = types[i];
10986 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10987 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10988 enum neon_el_type g_type = inst.vectype.el[i].type;
10989 unsigned g_size = inst.vectype.el[i].size;
10990
10991 /* Decay more-specific signed & unsigned types to sign-insensitive
10992 integer types if sign-specific variants are unavailable. */
10993 if ((g_type == NT_signed || g_type == NT_unsigned)
10994 && (types_allowed & N_SU_ALL) == 0)
10995 g_type = NT_integer;
10996
10997 /* If only untyped args are allowed, decay any more specific types to
10998 them. Some instructions only care about signs for some element
10999 sizes, so handle that properly. */
11000 if ((g_size == 8 && (types_allowed & N_8) != 0)
11001 || (g_size == 16 && (types_allowed & N_16) != 0)
11002 || (g_size == 32 && (types_allowed & N_32) != 0)
11003 || (g_size == 64 && (types_allowed & N_64) != 0))
11004 g_type = NT_untyped;
11005
11006 if (pass == 0)
11007 {
11008 if ((thisarg & N_KEY) != 0)
11009 {
11010 k_type = g_type;
11011 k_size = g_size;
11012 key_allowed = thisarg & ~N_KEY;
11013 }
11014 }
11015 else
11016 {
11017 if ((thisarg & N_VFP) != 0)
11018 {
11019 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
11020 unsigned regwidth = neon_shape_el_size[regshape], match;
11021
11022 /* In VFP mode, operands must match register widths. If we
11023 have a key operand, use its width, else use the width of
11024 the current operand. */
11025 if (k_size != -1u)
11026 match = k_size;
11027 else
11028 match = g_size;
11029
11030 if (regwidth != match)
11031 {
11032 first_error (_("operand size must match register width"));
11033 return badtype;
11034 }
11035 }
11036
11037 if ((thisarg & N_EQK) == 0)
11038 {
11039 unsigned given_type = type_chk_of_el_type (g_type, g_size);
11040
11041 if ((given_type & types_allowed) == 0)
11042 {
11043 first_error (_("bad type in Neon instruction"));
11044 return badtype;
11045 }
11046 }
11047 else
11048 {
11049 enum neon_el_type mod_k_type = k_type;
11050 unsigned mod_k_size = k_size;
11051 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
11052 if (g_type != mod_k_type || g_size != mod_k_size)
11053 {
11054 first_error (_("inconsistent types in Neon instruction"));
11055 return badtype;
11056 }
11057 }
11058 }
11059 }
11060 }
11061
11062 return inst.vectype.el[key_el];
11063 }
11064
11065 /* Neon-style VFP instruction forwarding. */
11066
11067 /* Thumb VFP instructions have 0xE in the condition field. */
11068
11069 static void
11070 do_vfp_cond_or_thumb (void)
11071 {
11072 if (thumb_mode)
11073 inst.instruction |= 0xe0000000;
11074 else
11075 inst.instruction |= inst.cond << 28;
11076 }
11077
11078 /* Look up and encode a simple mnemonic, for use as a helper function for the
11079 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11080 etc. It is assumed that operand parsing has already been done, and that the
11081 operands are in the form expected by the given opcode (this isn't necessarily
11082 the same as the form in which they were parsed, hence some massaging must
11083 take place before this function is called).
11084 Checks current arch version against that in the looked-up opcode. */
11085
11086 static void
11087 do_vfp_nsyn_opcode (const char *opname)
11088 {
11089 const struct asm_opcode *opcode;
11090
11091 opcode = hash_find (arm_ops_hsh, opname);
11092
11093 if (!opcode)
11094 abort ();
11095
11096 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
11097 thumb_mode ? *opcode->tvariant : *opcode->avariant),
11098 _(BAD_FPU));
11099
11100 if (thumb_mode)
11101 {
11102 inst.instruction = opcode->tvalue;
11103 opcode->tencode ();
11104 }
11105 else
11106 {
11107 inst.instruction = (inst.cond << 28) | opcode->avalue;
11108 opcode->aencode ();
11109 }
11110 }
11111
11112 static void
11113 do_vfp_nsyn_add_sub (enum neon_shape rs)
11114 {
11115 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
11116
11117 if (rs == NS_FFF)
11118 {
11119 if (is_add)
11120 do_vfp_nsyn_opcode ("fadds");
11121 else
11122 do_vfp_nsyn_opcode ("fsubs");
11123 }
11124 else
11125 {
11126 if (is_add)
11127 do_vfp_nsyn_opcode ("faddd");
11128 else
11129 do_vfp_nsyn_opcode ("fsubd");
11130 }
11131 }
11132
11133 /* Check operand types to see if this is a VFP instruction, and if so call
11134 PFN (). */
11135
11136 static int
11137 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11138 {
11139 enum neon_shape rs;
11140 struct neon_type_el et;
11141
11142 switch (args)
11143 {
11144 case 2:
11145 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11146 et = neon_check_type (2, rs,
11147 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11148 break;
11149
11150 case 3:
11151 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11152 et = neon_check_type (3, rs,
11153 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11154 break;
11155
11156 default:
11157 abort ();
11158 }
11159
11160 if (et.type != NT_invtype)
11161 {
11162 pfn (rs);
11163 return SUCCESS;
11164 }
11165 else
11166 inst.error = NULL;
11167
11168 return FAIL;
11169 }
11170
11171 static void
11172 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11173 {
11174 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11175
11176 if (rs == NS_FFF)
11177 {
11178 if (is_mla)
11179 do_vfp_nsyn_opcode ("fmacs");
11180 else
11181 do_vfp_nsyn_opcode ("fmscs");
11182 }
11183 else
11184 {
11185 if (is_mla)
11186 do_vfp_nsyn_opcode ("fmacd");
11187 else
11188 do_vfp_nsyn_opcode ("fmscd");
11189 }
11190 }
11191
11192 static void
11193 do_vfp_nsyn_mul (enum neon_shape rs)
11194 {
11195 if (rs == NS_FFF)
11196 do_vfp_nsyn_opcode ("fmuls");
11197 else
11198 do_vfp_nsyn_opcode ("fmuld");
11199 }
11200
11201 static void
11202 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11203 {
11204 int is_neg = (inst.instruction & 0x80) != 0;
11205 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11206
11207 if (rs == NS_FF)
11208 {
11209 if (is_neg)
11210 do_vfp_nsyn_opcode ("fnegs");
11211 else
11212 do_vfp_nsyn_opcode ("fabss");
11213 }
11214 else
11215 {
11216 if (is_neg)
11217 do_vfp_nsyn_opcode ("fnegd");
11218 else
11219 do_vfp_nsyn_opcode ("fabsd");
11220 }
11221 }
11222
11223 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11224 insns belong to Neon, and are handled elsewhere. */
11225
11226 static void
11227 do_vfp_nsyn_ldm_stm (int is_dbmode)
11228 {
11229 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11230 if (is_ldm)
11231 {
11232 if (is_dbmode)
11233 do_vfp_nsyn_opcode ("fldmdbs");
11234 else
11235 do_vfp_nsyn_opcode ("fldmias");
11236 }
11237 else
11238 {
11239 if (is_dbmode)
11240 do_vfp_nsyn_opcode ("fstmdbs");
11241 else
11242 do_vfp_nsyn_opcode ("fstmias");
11243 }
11244 }
11245
11246 static void
11247 do_vfp_nsyn_sqrt (void)
11248 {
11249 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11250 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11251
11252 if (rs == NS_FF)
11253 do_vfp_nsyn_opcode ("fsqrts");
11254 else
11255 do_vfp_nsyn_opcode ("fsqrtd");
11256 }
11257
11258 static void
11259 do_vfp_nsyn_div (void)
11260 {
11261 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11262 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11263 N_F32 | N_F64 | N_KEY | N_VFP);
11264
11265 if (rs == NS_FFF)
11266 do_vfp_nsyn_opcode ("fdivs");
11267 else
11268 do_vfp_nsyn_opcode ("fdivd");
11269 }
11270
11271 static void
11272 do_vfp_nsyn_nmul (void)
11273 {
11274 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11275 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11276 N_F32 | N_F64 | N_KEY | N_VFP);
11277
11278 if (rs == NS_FFF)
11279 {
11280 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11281 do_vfp_sp_dyadic ();
11282 }
11283 else
11284 {
11285 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11286 do_vfp_dp_rd_rn_rm ();
11287 }
11288 do_vfp_cond_or_thumb ();
11289 }
11290
11291 static void
11292 do_vfp_nsyn_cmp (void)
11293 {
11294 if (inst.operands[1].isreg)
11295 {
11296 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11297 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11298
11299 if (rs == NS_FF)
11300 {
11301 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11302 do_vfp_sp_monadic ();
11303 }
11304 else
11305 {
11306 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11307 do_vfp_dp_rd_rm ();
11308 }
11309 }
11310 else
11311 {
11312 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11313 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11314
11315 switch (inst.instruction & 0x0fffffff)
11316 {
11317 case N_MNEM_vcmp:
11318 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11319 break;
11320 case N_MNEM_vcmpe:
11321 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11322 break;
11323 default:
11324 abort ();
11325 }
11326
11327 if (rs == NS_FI)
11328 {
11329 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11330 do_vfp_sp_compare_z ();
11331 }
11332 else
11333 {
11334 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11335 do_vfp_dp_rd ();
11336 }
11337 }
11338 do_vfp_cond_or_thumb ();
11339 }
11340
11341 static void
11342 nsyn_insert_sp (void)
11343 {
11344 inst.operands[1] = inst.operands[0];
11345 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11346 inst.operands[0].reg = 13;
11347 inst.operands[0].isreg = 1;
11348 inst.operands[0].writeback = 1;
11349 inst.operands[0].present = 1;
11350 }
11351
11352 static void
11353 do_vfp_nsyn_push (void)
11354 {
11355 nsyn_insert_sp ();
11356 if (inst.operands[1].issingle)
11357 do_vfp_nsyn_opcode ("fstmdbs");
11358 else
11359 do_vfp_nsyn_opcode ("fstmdbd");
11360 }
11361
11362 static void
11363 do_vfp_nsyn_pop (void)
11364 {
11365 nsyn_insert_sp ();
11366 if (inst.operands[1].issingle)
11367 do_vfp_nsyn_opcode ("fldmias");
11368 else
11369 do_vfp_nsyn_opcode ("fldmiad");
11370 }
11371
11372 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11373 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11374
11375 static unsigned
11376 neon_dp_fixup (unsigned i)
11377 {
11378 if (thumb_mode)
11379 {
11380 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11381 if (i & (1 << 24))
11382 i |= 1 << 28;
11383
11384 i &= ~(1 << 24);
11385
11386 i |= 0xef000000;
11387 }
11388 else
11389 i |= 0xf2000000;
11390
11391 return i;
11392 }
11393
11394 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11395 (0, 1, 2, 3). */
11396
11397 static unsigned
11398 neon_logbits (unsigned x)
11399 {
11400 return ffs (x) - 4;
11401 }
11402
11403 #define LOW4(R) ((R) & 0xf)
11404 #define HI1(R) (((R) >> 4) & 1)
11405
11406 /* Encode insns with bit pattern:
11407
11408 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11409 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11410
11411 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11412 different meaning for some instruction. */
11413
11414 static void
11415 neon_three_same (int isquad, int ubit, int size)
11416 {
11417 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11418 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11419 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11420 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11421 inst.instruction |= LOW4 (inst.operands[2].reg);
11422 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11423 inst.instruction |= (isquad != 0) << 6;
11424 inst.instruction |= (ubit != 0) << 24;
11425 if (size != -1)
11426 inst.instruction |= neon_logbits (size) << 20;
11427
11428 inst.instruction = neon_dp_fixup (inst.instruction);
11429 }
11430
11431 /* Encode instructions of the form:
11432
11433 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11434 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11435
11436 Don't write size if SIZE == -1. */
11437
11438 static void
11439 neon_two_same (int qbit, int ubit, int size)
11440 {
11441 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11442 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11443 inst.instruction |= LOW4 (inst.operands[1].reg);
11444 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11445 inst.instruction |= (qbit != 0) << 6;
11446 inst.instruction |= (ubit != 0) << 24;
11447
11448 if (size != -1)
11449 inst.instruction |= neon_logbits (size) << 18;
11450
11451 inst.instruction = neon_dp_fixup (inst.instruction);
11452 }
11453
11454 /* Neon instruction encoders, in approximate order of appearance. */
11455
11456 static void
11457 do_neon_dyadic_i_su (void)
11458 {
11459 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11460 struct neon_type_el et = neon_check_type (3, rs,
11461 N_EQK, N_EQK, N_SU_32 | N_KEY);
11462 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11463 }
11464
11465 static void
11466 do_neon_dyadic_i64_su (void)
11467 {
11468 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11469 struct neon_type_el et = neon_check_type (3, rs,
11470 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11471 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11472 }
11473
11474 static void
11475 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
11476 unsigned immbits)
11477 {
11478 unsigned size = et.size >> 3;
11479 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11480 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11481 inst.instruction |= LOW4 (inst.operands[1].reg);
11482 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11483 inst.instruction |= (isquad != 0) << 6;
11484 inst.instruction |= immbits << 16;
11485 inst.instruction |= (size >> 3) << 7;
11486 inst.instruction |= (size & 0x7) << 19;
11487 if (write_ubit)
11488 inst.instruction |= (uval != 0) << 24;
11489
11490 inst.instruction = neon_dp_fixup (inst.instruction);
11491 }
11492
11493 static void
11494 do_neon_shl_imm (void)
11495 {
11496 if (!inst.operands[2].isreg)
11497 {
11498 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11499 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
11500 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11501 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
11502 }
11503 else
11504 {
11505 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11506 struct neon_type_el et = neon_check_type (3, rs,
11507 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11508 unsigned int tmp;
11509
11510 /* VSHL/VQSHL 3-register variants have syntax such as:
11511 vshl.xx Dd, Dm, Dn
11512 whereas other 3-register operations encoded by neon_three_same have
11513 syntax like:
11514 vadd.xx Dd, Dn, Dm
11515 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
11516 here. */
11517 tmp = inst.operands[2].reg;
11518 inst.operands[2].reg = inst.operands[1].reg;
11519 inst.operands[1].reg = tmp;
11520 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11521 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11522 }
11523 }
11524
11525 static void
11526 do_neon_qshl_imm (void)
11527 {
11528 if (!inst.operands[2].isreg)
11529 {
11530 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11531 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11532
11533 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11534 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
11535 inst.operands[2].imm);
11536 }
11537 else
11538 {
11539 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11540 struct neon_type_el et = neon_check_type (3, rs,
11541 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11542 unsigned int tmp;
11543
11544 /* See note in do_neon_shl_imm. */
11545 tmp = inst.operands[2].reg;
11546 inst.operands[2].reg = inst.operands[1].reg;
11547 inst.operands[1].reg = tmp;
11548 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11549 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11550 }
11551 }
11552
11553 static void
11554 do_neon_rshl (void)
11555 {
11556 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11557 struct neon_type_el et = neon_check_type (3, rs,
11558 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11559 unsigned int tmp;
11560
11561 tmp = inst.operands[2].reg;
11562 inst.operands[2].reg = inst.operands[1].reg;
11563 inst.operands[1].reg = tmp;
11564 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11565 }
11566
11567 static int
11568 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
11569 {
11570 /* Handle .I8 pseudo-instructions. */
11571 if (size == 8)
11572 {
11573 /* Unfortunately, this will make everything apart from zero out-of-range.
11574 FIXME is this the intended semantics? There doesn't seem much point in
11575 accepting .I8 if so. */
11576 immediate |= immediate << 8;
11577 size = 16;
11578 }
11579
11580 if (size >= 32)
11581 {
11582 if (immediate == (immediate & 0x000000ff))
11583 {
11584 *immbits = immediate;
11585 return 0x1;
11586 }
11587 else if (immediate == (immediate & 0x0000ff00))
11588 {
11589 *immbits = immediate >> 8;
11590 return 0x3;
11591 }
11592 else if (immediate == (immediate & 0x00ff0000))
11593 {
11594 *immbits = immediate >> 16;
11595 return 0x5;
11596 }
11597 else if (immediate == (immediate & 0xff000000))
11598 {
11599 *immbits = immediate >> 24;
11600 return 0x7;
11601 }
11602 if ((immediate & 0xffff) != (immediate >> 16))
11603 goto bad_immediate;
11604 immediate &= 0xffff;
11605 }
11606
11607 if (immediate == (immediate & 0x000000ff))
11608 {
11609 *immbits = immediate;
11610 return 0x9;
11611 }
11612 else if (immediate == (immediate & 0x0000ff00))
11613 {
11614 *immbits = immediate >> 8;
11615 return 0xb;
11616 }
11617
11618 bad_immediate:
11619 first_error (_("immediate value out of range"));
11620 return FAIL;
11621 }
11622
11623 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11624 A, B, C, D. */
11625
11626 static int
11627 neon_bits_same_in_bytes (unsigned imm)
11628 {
11629 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
11630 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
11631 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
11632 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
11633 }
11634
11635 /* For immediate of above form, return 0bABCD. */
11636
11637 static unsigned
11638 neon_squash_bits (unsigned imm)
11639 {
11640 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
11641 | ((imm & 0x01000000) >> 21);
11642 }
11643
11644 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11645
11646 static unsigned
11647 neon_qfloat_bits (unsigned imm)
11648 {
11649 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
11650 }
11651
11652 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11653 the instruction. *OP is passed as the initial value of the op field, and
11654 may be set to a different value depending on the constant (i.e.
11655 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11656 MVN). If the immediate looks like a repeated parttern then also
11657 try smaller element sizes. */
11658
11659 static int
11660 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
11661 unsigned *immbits, int *op, int size,
11662 enum neon_el_type type)
11663 {
11664 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
11665 float. */
11666 if (type == NT_float && !float_p)
11667 return FAIL;
11668
11669 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11670 {
11671 if (size != 32 || *op == 1)
11672 return FAIL;
11673 *immbits = neon_qfloat_bits (immlo);
11674 return 0xf;
11675 }
11676
11677 if (size == 64)
11678 {
11679 if (neon_bits_same_in_bytes (immhi)
11680 && neon_bits_same_in_bytes (immlo))
11681 {
11682 if (*op == 1)
11683 return FAIL;
11684 *immbits = (neon_squash_bits (immhi) << 4)
11685 | neon_squash_bits (immlo);
11686 *op = 1;
11687 return 0xe;
11688 }
11689
11690 if (immhi != immlo)
11691 return FAIL;
11692 }
11693
11694 if (size >= 32)
11695 {
11696 if (immlo == (immlo & 0x000000ff))
11697 {
11698 *immbits = immlo;
11699 return 0x0;
11700 }
11701 else if (immlo == (immlo & 0x0000ff00))
11702 {
11703 *immbits = immlo >> 8;
11704 return 0x2;
11705 }
11706 else if (immlo == (immlo & 0x00ff0000))
11707 {
11708 *immbits = immlo >> 16;
11709 return 0x4;
11710 }
11711 else if (immlo == (immlo & 0xff000000))
11712 {
11713 *immbits = immlo >> 24;
11714 return 0x6;
11715 }
11716 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11717 {
11718 *immbits = (immlo >> 8) & 0xff;
11719 return 0xc;
11720 }
11721 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11722 {
11723 *immbits = (immlo >> 16) & 0xff;
11724 return 0xd;
11725 }
11726
11727 if ((immlo & 0xffff) != (immlo >> 16))
11728 return FAIL;
11729 immlo &= 0xffff;
11730 }
11731
11732 if (size >= 16)
11733 {
11734 if (immlo == (immlo & 0x000000ff))
11735 {
11736 *immbits = immlo;
11737 return 0x8;
11738 }
11739 else if (immlo == (immlo & 0x0000ff00))
11740 {
11741 *immbits = immlo >> 8;
11742 return 0xa;
11743 }
11744
11745 if ((immlo & 0xff) != (immlo >> 8))
11746 return FAIL;
11747 immlo &= 0xff;
11748 }
11749
11750 if (immlo == (immlo & 0x000000ff))
11751 {
11752 /* Don't allow MVN with 8-bit immediate. */
11753 if (*op == 1)
11754 return FAIL;
11755 *immbits = immlo;
11756 return 0xe;
11757 }
11758
11759 return FAIL;
11760 }
11761
11762 /* Write immediate bits [7:0] to the following locations:
11763
11764 |28/24|23 19|18 16|15 4|3 0|
11765 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11766
11767 This function is used by VMOV/VMVN/VORR/VBIC. */
11768
11769 static void
11770 neon_write_immbits (unsigned immbits)
11771 {
11772 inst.instruction |= immbits & 0xf;
11773 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11774 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11775 }
11776
11777 /* Invert low-order SIZE bits of XHI:XLO. */
11778
11779 static void
11780 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11781 {
11782 unsigned immlo = xlo ? *xlo : 0;
11783 unsigned immhi = xhi ? *xhi : 0;
11784
11785 switch (size)
11786 {
11787 case 8:
11788 immlo = (~immlo) & 0xff;
11789 break;
11790
11791 case 16:
11792 immlo = (~immlo) & 0xffff;
11793 break;
11794
11795 case 64:
11796 immhi = (~immhi) & 0xffffffff;
11797 /* fall through. */
11798
11799 case 32:
11800 immlo = (~immlo) & 0xffffffff;
11801 break;
11802
11803 default:
11804 abort ();
11805 }
11806
11807 if (xlo)
11808 *xlo = immlo;
11809
11810 if (xhi)
11811 *xhi = immhi;
11812 }
11813
11814 static void
11815 do_neon_logic (void)
11816 {
11817 if (inst.operands[2].present && inst.operands[2].isreg)
11818 {
11819 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11820 neon_check_type (3, rs, N_IGNORE_TYPE);
11821 /* U bit and size field were set as part of the bitmask. */
11822 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11823 neon_three_same (neon_quad (rs), 0, -1);
11824 }
11825 else
11826 {
11827 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11828 struct neon_type_el et = neon_check_type (2, rs,
11829 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11830 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11831 unsigned immbits;
11832 int cmode;
11833
11834 if (et.type == NT_invtype)
11835 return;
11836
11837 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11838
11839 immbits = inst.operands[1].imm;
11840 if (et.size == 64)
11841 {
11842 /* .i64 is a pseudo-op, so the immediate must be a repeating
11843 pattern. */
11844 if (immbits != (inst.operands[1].regisimm ?
11845 inst.operands[1].reg : 0))
11846 {
11847 /* Set immbits to an invalid constant. */
11848 immbits = 0xdeadbeef;
11849 }
11850 }
11851
11852 switch (opcode)
11853 {
11854 case N_MNEM_vbic:
11855 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11856 break;
11857
11858 case N_MNEM_vorr:
11859 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11860 break;
11861
11862 case N_MNEM_vand:
11863 /* Pseudo-instruction for VBIC. */
11864 neon_invert_size (&immbits, 0, et.size);
11865 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11866 break;
11867
11868 case N_MNEM_vorn:
11869 /* Pseudo-instruction for VORR. */
11870 neon_invert_size (&immbits, 0, et.size);
11871 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11872 break;
11873
11874 default:
11875 abort ();
11876 }
11877
11878 if (cmode == FAIL)
11879 return;
11880
11881 inst.instruction |= neon_quad (rs) << 6;
11882 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11883 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11884 inst.instruction |= cmode << 8;
11885 neon_write_immbits (immbits);
11886
11887 inst.instruction = neon_dp_fixup (inst.instruction);
11888 }
11889 }
11890
11891 static void
11892 do_neon_bitfield (void)
11893 {
11894 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11895 neon_check_type (3, rs, N_IGNORE_TYPE);
11896 neon_three_same (neon_quad (rs), 0, -1);
11897 }
11898
11899 static void
11900 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
11901 unsigned destbits)
11902 {
11903 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11904 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
11905 types | N_KEY);
11906 if (et.type == NT_float)
11907 {
11908 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
11909 neon_three_same (neon_quad (rs), 0, -1);
11910 }
11911 else
11912 {
11913 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11914 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
11915 }
11916 }
11917
11918 static void
11919 do_neon_dyadic_if_su (void)
11920 {
11921 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11922 }
11923
11924 static void
11925 do_neon_dyadic_if_su_d (void)
11926 {
11927 /* This version only allow D registers, but that constraint is enforced during
11928 operand parsing so we don't need to do anything extra here. */
11929 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11930 }
11931
11932 static void
11933 do_neon_dyadic_if_i_d (void)
11934 {
11935 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11936 affected if we specify unsigned args. */
11937 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
11938 }
11939
11940 enum vfp_or_neon_is_neon_bits
11941 {
11942 NEON_CHECK_CC = 1,
11943 NEON_CHECK_ARCH = 2
11944 };
11945
11946 /* Call this function if an instruction which may have belonged to the VFP or
11947 Neon instruction sets, but turned out to be a Neon instruction (due to the
11948 operand types involved, etc.). We have to check and/or fix-up a couple of
11949 things:
11950
11951 - Make sure the user hasn't attempted to make a Neon instruction
11952 conditional.
11953 - Alter the value in the condition code field if necessary.
11954 - Make sure that the arch supports Neon instructions.
11955
11956 Which of these operations take place depends on bits from enum
11957 vfp_or_neon_is_neon_bits.
11958
11959 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11960 current instruction's condition is COND_ALWAYS, the condition field is
11961 changed to inst.uncond_value. This is necessary because instructions shared
11962 between VFP and Neon may be conditional for the VFP variants only, and the
11963 unconditional Neon version must have, e.g., 0xF in the condition field. */
11964
11965 static int
11966 vfp_or_neon_is_neon (unsigned check)
11967 {
11968 /* Conditions are always legal in Thumb mode (IT blocks). */
11969 if (!thumb_mode && (check & NEON_CHECK_CC))
11970 {
11971 if (inst.cond != COND_ALWAYS)
11972 {
11973 first_error (_(BAD_COND));
11974 return FAIL;
11975 }
11976 if (inst.uncond_value != -1)
11977 inst.instruction |= inst.uncond_value << 28;
11978 }
11979
11980 if ((check & NEON_CHECK_ARCH)
11981 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
11982 {
11983 first_error (_(BAD_FPU));
11984 return FAIL;
11985 }
11986
11987 return SUCCESS;
11988 }
11989
11990 static void
11991 do_neon_addsub_if_i (void)
11992 {
11993 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
11994 return;
11995
11996 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11997 return;
11998
11999 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12000 affected if we specify unsigned args. */
12001 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
12002 }
12003
12004 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12005 result to be:
12006 V<op> A,B (A is operand 0, B is operand 2)
12007 to mean:
12008 V<op> A,B,A
12009 not:
12010 V<op> A,B,B
12011 so handle that case specially. */
12012
12013 static void
12014 neon_exchange_operands (void)
12015 {
12016 void *scratch = alloca (sizeof (inst.operands[0]));
12017 if (inst.operands[1].present)
12018 {
12019 /* Swap operands[1] and operands[2]. */
12020 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
12021 inst.operands[1] = inst.operands[2];
12022 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
12023 }
12024 else
12025 {
12026 inst.operands[1] = inst.operands[2];
12027 inst.operands[2] = inst.operands[0];
12028 }
12029 }
12030
12031 static void
12032 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
12033 {
12034 if (inst.operands[2].isreg)
12035 {
12036 if (invert)
12037 neon_exchange_operands ();
12038 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
12039 }
12040 else
12041 {
12042 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12043 struct neon_type_el et = neon_check_type (2, rs,
12044 N_EQK | N_SIZ, immtypes | N_KEY);
12045
12046 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12047 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12048 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12049 inst.instruction |= LOW4 (inst.operands[1].reg);
12050 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12051 inst.instruction |= neon_quad (rs) << 6;
12052 inst.instruction |= (et.type == NT_float) << 10;
12053 inst.instruction |= neon_logbits (et.size) << 18;
12054
12055 inst.instruction = neon_dp_fixup (inst.instruction);
12056 }
12057 }
12058
12059 static void
12060 do_neon_cmp (void)
12061 {
12062 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
12063 }
12064
12065 static void
12066 do_neon_cmp_inv (void)
12067 {
12068 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
12069 }
12070
12071 static void
12072 do_neon_ceq (void)
12073 {
12074 neon_compare (N_IF_32, N_IF_32, FALSE);
12075 }
12076
12077 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12078 scalars, which are encoded in 5 bits, M : Rm.
12079 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12080 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12081 index in M. */
12082
12083 static unsigned
12084 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
12085 {
12086 unsigned regno = NEON_SCALAR_REG (scalar);
12087 unsigned elno = NEON_SCALAR_INDEX (scalar);
12088
12089 switch (elsize)
12090 {
12091 case 16:
12092 if (regno > 7 || elno > 3)
12093 goto bad_scalar;
12094 return regno | (elno << 3);
12095
12096 case 32:
12097 if (regno > 15 || elno > 1)
12098 goto bad_scalar;
12099 return regno | (elno << 4);
12100
12101 default:
12102 bad_scalar:
12103 first_error (_("scalar out of range for multiply instruction"));
12104 }
12105
12106 return 0;
12107 }
12108
12109 /* Encode multiply / multiply-accumulate scalar instructions. */
12110
12111 static void
12112 neon_mul_mac (struct neon_type_el et, int ubit)
12113 {
12114 unsigned scalar;
12115
12116 /* Give a more helpful error message if we have an invalid type. */
12117 if (et.type == NT_invtype)
12118 return;
12119
12120 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
12121 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12122 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12123 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12124 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12125 inst.instruction |= LOW4 (scalar);
12126 inst.instruction |= HI1 (scalar) << 5;
12127 inst.instruction |= (et.type == NT_float) << 8;
12128 inst.instruction |= neon_logbits (et.size) << 20;
12129 inst.instruction |= (ubit != 0) << 24;
12130
12131 inst.instruction = neon_dp_fixup (inst.instruction);
12132 }
12133
12134 static void
12135 do_neon_mac_maybe_scalar (void)
12136 {
12137 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
12138 return;
12139
12140 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12141 return;
12142
12143 if (inst.operands[2].isscalar)
12144 {
12145 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12146 struct neon_type_el et = neon_check_type (3, rs,
12147 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
12148 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12149 neon_mul_mac (et, neon_quad (rs));
12150 }
12151 else
12152 {
12153 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12154 affected if we specify unsigned args. */
12155 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12156 }
12157 }
12158
12159 static void
12160 do_neon_tst (void)
12161 {
12162 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12163 struct neon_type_el et = neon_check_type (3, rs,
12164 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12165 neon_three_same (neon_quad (rs), 0, et.size);
12166 }
12167
12168 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12169 same types as the MAC equivalents. The polynomial type for this instruction
12170 is encoded the same as the integer type. */
12171
12172 static void
12173 do_neon_mul (void)
12174 {
12175 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12176 return;
12177
12178 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12179 return;
12180
12181 if (inst.operands[2].isscalar)
12182 do_neon_mac_maybe_scalar ();
12183 else
12184 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12185 }
12186
12187 static void
12188 do_neon_qdmulh (void)
12189 {
12190 if (inst.operands[2].isscalar)
12191 {
12192 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12193 struct neon_type_el et = neon_check_type (3, rs,
12194 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12195 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12196 neon_mul_mac (et, neon_quad (rs));
12197 }
12198 else
12199 {
12200 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12201 struct neon_type_el et = neon_check_type (3, rs,
12202 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12203 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12204 /* The U bit (rounding) comes from bit mask. */
12205 neon_three_same (neon_quad (rs), 0, et.size);
12206 }
12207 }
12208
12209 static void
12210 do_neon_fcmp_absolute (void)
12211 {
12212 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12213 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12214 /* Size field comes from bit mask. */
12215 neon_three_same (neon_quad (rs), 1, -1);
12216 }
12217
12218 static void
12219 do_neon_fcmp_absolute_inv (void)
12220 {
12221 neon_exchange_operands ();
12222 do_neon_fcmp_absolute ();
12223 }
12224
12225 static void
12226 do_neon_step (void)
12227 {
12228 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12229 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12230 neon_three_same (neon_quad (rs), 0, -1);
12231 }
12232
12233 static void
12234 do_neon_abs_neg (void)
12235 {
12236 enum neon_shape rs;
12237 struct neon_type_el et;
12238
12239 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12240 return;
12241
12242 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12243 return;
12244
12245 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12246 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12247
12248 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12249 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12250 inst.instruction |= LOW4 (inst.operands[1].reg);
12251 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12252 inst.instruction |= neon_quad (rs) << 6;
12253 inst.instruction |= (et.type == NT_float) << 10;
12254 inst.instruction |= neon_logbits (et.size) << 18;
12255
12256 inst.instruction = neon_dp_fixup (inst.instruction);
12257 }
12258
12259 static void
12260 do_neon_sli (void)
12261 {
12262 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12263 struct neon_type_el et = neon_check_type (2, rs,
12264 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12265 int imm = inst.operands[2].imm;
12266 constraint (imm < 0 || (unsigned)imm >= et.size,
12267 _("immediate out of range for insert"));
12268 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12269 }
12270
12271 static void
12272 do_neon_sri (void)
12273 {
12274 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12275 struct neon_type_el et = neon_check_type (2, rs,
12276 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12277 int imm = inst.operands[2].imm;
12278 constraint (imm < 1 || (unsigned)imm > et.size,
12279 _("immediate out of range for insert"));
12280 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12281 }
12282
12283 static void
12284 do_neon_qshlu_imm (void)
12285 {
12286 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12287 struct neon_type_el et = neon_check_type (2, rs,
12288 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12289 int imm = inst.operands[2].imm;
12290 constraint (imm < 0 || (unsigned)imm >= et.size,
12291 _("immediate out of range for shift"));
12292 /* Only encodes the 'U present' variant of the instruction.
12293 In this case, signed types have OP (bit 8) set to 0.
12294 Unsigned types have OP set to 1. */
12295 inst.instruction |= (et.type == NT_unsigned) << 8;
12296 /* The rest of the bits are the same as other immediate shifts. */
12297 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12298 }
12299
12300 static void
12301 do_neon_qmovn (void)
12302 {
12303 struct neon_type_el et = neon_check_type (2, NS_DQ,
12304 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12305 /* Saturating move where operands can be signed or unsigned, and the
12306 destination has the same signedness. */
12307 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12308 if (et.type == NT_unsigned)
12309 inst.instruction |= 0xc0;
12310 else
12311 inst.instruction |= 0x80;
12312 neon_two_same (0, 1, et.size / 2);
12313 }
12314
12315 static void
12316 do_neon_qmovun (void)
12317 {
12318 struct neon_type_el et = neon_check_type (2, NS_DQ,
12319 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12320 /* Saturating move with unsigned results. Operands must be signed. */
12321 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12322 neon_two_same (0, 1, et.size / 2);
12323 }
12324
12325 static void
12326 do_neon_rshift_sat_narrow (void)
12327 {
12328 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12329 or unsigned. If operands are unsigned, results must also be unsigned. */
12330 struct neon_type_el et = neon_check_type (2, NS_DQI,
12331 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12332 int imm = inst.operands[2].imm;
12333 /* This gets the bounds check, size encoding and immediate bits calculation
12334 right. */
12335 et.size /= 2;
12336
12337 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12338 VQMOVN.I<size> <Dd>, <Qm>. */
12339 if (imm == 0)
12340 {
12341 inst.operands[2].present = 0;
12342 inst.instruction = N_MNEM_vqmovn;
12343 do_neon_qmovn ();
12344 return;
12345 }
12346
12347 constraint (imm < 1 || (unsigned)imm > et.size,
12348 _("immediate out of range"));
12349 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12350 }
12351
12352 static void
12353 do_neon_rshift_sat_narrow_u (void)
12354 {
12355 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12356 or unsigned. If operands are unsigned, results must also be unsigned. */
12357 struct neon_type_el et = neon_check_type (2, NS_DQI,
12358 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12359 int imm = inst.operands[2].imm;
12360 /* This gets the bounds check, size encoding and immediate bits calculation
12361 right. */
12362 et.size /= 2;
12363
12364 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12365 VQMOVUN.I<size> <Dd>, <Qm>. */
12366 if (imm == 0)
12367 {
12368 inst.operands[2].present = 0;
12369 inst.instruction = N_MNEM_vqmovun;
12370 do_neon_qmovun ();
12371 return;
12372 }
12373
12374 constraint (imm < 1 || (unsigned)imm > et.size,
12375 _("immediate out of range"));
12376 /* FIXME: The manual is kind of unclear about what value U should have in
12377 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12378 must be 1. */
12379 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12380 }
12381
12382 static void
12383 do_neon_movn (void)
12384 {
12385 struct neon_type_el et = neon_check_type (2, NS_DQ,
12386 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12387 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12388 neon_two_same (0, 1, et.size / 2);
12389 }
12390
12391 static void
12392 do_neon_rshift_narrow (void)
12393 {
12394 struct neon_type_el et = neon_check_type (2, NS_DQI,
12395 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12396 int imm = inst.operands[2].imm;
12397 /* This gets the bounds check, size encoding and immediate bits calculation
12398 right. */
12399 et.size /= 2;
12400
12401 /* If immediate is zero then we are a pseudo-instruction for
12402 VMOVN.I<size> <Dd>, <Qm> */
12403 if (imm == 0)
12404 {
12405 inst.operands[2].present = 0;
12406 inst.instruction = N_MNEM_vmovn;
12407 do_neon_movn ();
12408 return;
12409 }
12410
12411 constraint (imm < 1 || (unsigned)imm > et.size,
12412 _("immediate out of range for narrowing operation"));
12413 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12414 }
12415
12416 static void
12417 do_neon_shll (void)
12418 {
12419 /* FIXME: Type checking when lengthening. */
12420 struct neon_type_el et = neon_check_type (2, NS_QDI,
12421 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12422 unsigned imm = inst.operands[2].imm;
12423
12424 if (imm == et.size)
12425 {
12426 /* Maximum shift variant. */
12427 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12428 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12429 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12430 inst.instruction |= LOW4 (inst.operands[1].reg);
12431 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12432 inst.instruction |= neon_logbits (et.size) << 18;
12433
12434 inst.instruction = neon_dp_fixup (inst.instruction);
12435 }
12436 else
12437 {
12438 /* A more-specific type check for non-max versions. */
12439 et = neon_check_type (2, NS_QDI,
12440 N_EQK | N_DBL, N_SU_32 | N_KEY);
12441 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12442 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12443 }
12444 }
12445
12446 /* Check the various types for the VCVT instruction, and return which version
12447 the current instruction is. */
12448
12449 static int
12450 neon_cvt_flavour (enum neon_shape rs)
12451 {
12452 #define CVT_VAR(C,X,Y) \
12453 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12454 if (et.type != NT_invtype) \
12455 { \
12456 inst.error = NULL; \
12457 return (C); \
12458 }
12459 struct neon_type_el et;
12460 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12461 || rs == NS_FF) ? N_VFP : 0;
12462 /* The instruction versions which take an immediate take one register
12463 argument, which is extended to the width of the full register. Thus the
12464 "source" and "destination" registers must have the same width. Hack that
12465 here by making the size equal to the key (wider, in this case) operand. */
12466 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
12467
12468 CVT_VAR (0, N_S32, N_F32);
12469 CVT_VAR (1, N_U32, N_F32);
12470 CVT_VAR (2, N_F32, N_S32);
12471 CVT_VAR (3, N_F32, N_U32);
12472
12473 whole_reg = N_VFP;
12474
12475 /* VFP instructions. */
12476 CVT_VAR (4, N_F32, N_F64);
12477 CVT_VAR (5, N_F64, N_F32);
12478 CVT_VAR (6, N_S32, N_F64 | key);
12479 CVT_VAR (7, N_U32, N_F64 | key);
12480 CVT_VAR (8, N_F64 | key, N_S32);
12481 CVT_VAR (9, N_F64 | key, N_U32);
12482 /* VFP instructions with bitshift. */
12483 CVT_VAR (10, N_F32 | key, N_S16);
12484 CVT_VAR (11, N_F32 | key, N_U16);
12485 CVT_VAR (12, N_F64 | key, N_S16);
12486 CVT_VAR (13, N_F64 | key, N_U16);
12487 CVT_VAR (14, N_S16, N_F32 | key);
12488 CVT_VAR (15, N_U16, N_F32 | key);
12489 CVT_VAR (16, N_S16, N_F64 | key);
12490 CVT_VAR (17, N_U16, N_F64 | key);
12491
12492 return -1;
12493 #undef CVT_VAR
12494 }
12495
12496 /* Neon-syntax VFP conversions. */
12497
12498 static void
12499 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
12500 {
12501 const char *opname = 0;
12502
12503 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
12504 {
12505 /* Conversions with immediate bitshift. */
12506 const char *enc[] =
12507 {
12508 "ftosls",
12509 "ftouls",
12510 "fsltos",
12511 "fultos",
12512 NULL,
12513 NULL,
12514 "ftosld",
12515 "ftould",
12516 "fsltod",
12517 "fultod",
12518 "fshtos",
12519 "fuhtos",
12520 "fshtod",
12521 "fuhtod",
12522 "ftoshs",
12523 "ftouhs",
12524 "ftoshd",
12525 "ftouhd"
12526 };
12527
12528 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12529 {
12530 opname = enc[flavour];
12531 constraint (inst.operands[0].reg != inst.operands[1].reg,
12532 _("operands 0 and 1 must be the same register"));
12533 inst.operands[1] = inst.operands[2];
12534 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
12535 }
12536 }
12537 else
12538 {
12539 /* Conversions without bitshift. */
12540 const char *enc[] =
12541 {
12542 "ftosis",
12543 "ftouis",
12544 "fsitos",
12545 "fuitos",
12546 "fcvtsd",
12547 "fcvtds",
12548 "ftosid",
12549 "ftouid",
12550 "fsitod",
12551 "fuitod"
12552 };
12553
12554 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12555 opname = enc[flavour];
12556 }
12557
12558 if (opname)
12559 do_vfp_nsyn_opcode (opname);
12560 }
12561
12562 static void
12563 do_vfp_nsyn_cvtz (void)
12564 {
12565 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
12566 int flavour = neon_cvt_flavour (rs);
12567 const char *enc[] =
12568 {
12569 "ftosizs",
12570 "ftouizs",
12571 NULL,
12572 NULL,
12573 NULL,
12574 NULL,
12575 "ftosizd",
12576 "ftouizd"
12577 };
12578
12579 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
12580 do_vfp_nsyn_opcode (enc[flavour]);
12581 }
12582
12583 static void
12584 do_neon_cvt (void)
12585 {
12586 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
12587 NS_FD, NS_DF, NS_FF, NS_NULL);
12588 int flavour = neon_cvt_flavour (rs);
12589
12590 /* VFP rather than Neon conversions. */
12591 if (flavour >= 4)
12592 {
12593 do_vfp_nsyn_cvt (rs, flavour);
12594 return;
12595 }
12596
12597 switch (rs)
12598 {
12599 case NS_DDI:
12600 case NS_QQI:
12601 {
12602 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12603 return;
12604
12605 /* Fixed-point conversion with #0 immediate is encoded as an
12606 integer conversion. */
12607 if (inst.operands[2].present && inst.operands[2].imm == 0)
12608 goto int_encode;
12609 unsigned immbits = 32 - inst.operands[2].imm;
12610 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12611 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12612 if (flavour != -1)
12613 inst.instruction |= enctab[flavour];
12614 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12615 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12616 inst.instruction |= LOW4 (inst.operands[1].reg);
12617 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12618 inst.instruction |= neon_quad (rs) << 6;
12619 inst.instruction |= 1 << 21;
12620 inst.instruction |= immbits << 16;
12621
12622 inst.instruction = neon_dp_fixup (inst.instruction);
12623 }
12624 break;
12625
12626 case NS_DD:
12627 case NS_QQ:
12628 int_encode:
12629 {
12630 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
12631
12632 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12633
12634 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12635 return;
12636
12637 if (flavour != -1)
12638 inst.instruction |= enctab[flavour];
12639
12640 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12641 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12642 inst.instruction |= LOW4 (inst.operands[1].reg);
12643 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12644 inst.instruction |= neon_quad (rs) << 6;
12645 inst.instruction |= 2 << 18;
12646
12647 inst.instruction = neon_dp_fixup (inst.instruction);
12648 }
12649 break;
12650
12651 default:
12652 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12653 do_vfp_nsyn_cvt (rs, flavour);
12654 }
12655 }
12656
12657 static void
12658 neon_move_immediate (void)
12659 {
12660 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12661 struct neon_type_el et = neon_check_type (2, rs,
12662 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12663 unsigned immlo, immhi = 0, immbits;
12664 int op, cmode, float_p;
12665
12666 constraint (et.type == NT_invtype,
12667 _("operand size must be specified for immediate VMOV"));
12668
12669 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12670 op = (inst.instruction & (1 << 5)) != 0;
12671
12672 immlo = inst.operands[1].imm;
12673 if (inst.operands[1].regisimm)
12674 immhi = inst.operands[1].reg;
12675
12676 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
12677 _("immediate has bits set outside the operand size"));
12678
12679 float_p = inst.operands[1].immisfloat;
12680
12681 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
12682 et.size, et.type)) == FAIL)
12683 {
12684 /* Invert relevant bits only. */
12685 neon_invert_size (&immlo, &immhi, et.size);
12686 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12687 with one or the other; those cases are caught by
12688 neon_cmode_for_move_imm. */
12689 op = !op;
12690 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
12691 &op, et.size, et.type)) == FAIL)
12692 {
12693 first_error (_("immediate out of range"));
12694 return;
12695 }
12696 }
12697
12698 inst.instruction &= ~(1 << 5);
12699 inst.instruction |= op << 5;
12700
12701 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12702 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12703 inst.instruction |= neon_quad (rs) << 6;
12704 inst.instruction |= cmode << 8;
12705
12706 neon_write_immbits (immbits);
12707 }
12708
12709 static void
12710 do_neon_mvn (void)
12711 {
12712 if (inst.operands[1].isreg)
12713 {
12714 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12715
12716 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12717 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12718 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12719 inst.instruction |= LOW4 (inst.operands[1].reg);
12720 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12721 inst.instruction |= neon_quad (rs) << 6;
12722 }
12723 else
12724 {
12725 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12726 neon_move_immediate ();
12727 }
12728
12729 inst.instruction = neon_dp_fixup (inst.instruction);
12730 }
12731
12732 /* Encode instructions of form:
12733
12734 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12735 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12736
12737 */
12738
12739 static void
12740 neon_mixed_length (struct neon_type_el et, unsigned size)
12741 {
12742 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12743 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12744 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12745 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12746 inst.instruction |= LOW4 (inst.operands[2].reg);
12747 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12748 inst.instruction |= (et.type == NT_unsigned) << 24;
12749 inst.instruction |= neon_logbits (size) << 20;
12750
12751 inst.instruction = neon_dp_fixup (inst.instruction);
12752 }
12753
12754 static void
12755 do_neon_dyadic_long (void)
12756 {
12757 /* FIXME: Type checking for lengthening op. */
12758 struct neon_type_el et = neon_check_type (3, NS_QDD,
12759 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12760 neon_mixed_length (et, et.size);
12761 }
12762
12763 static void
12764 do_neon_abal (void)
12765 {
12766 struct neon_type_el et = neon_check_type (3, NS_QDD,
12767 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12768 neon_mixed_length (et, et.size);
12769 }
12770
12771 static void
12772 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12773 {
12774 if (inst.operands[2].isscalar)
12775 {
12776 struct neon_type_el et = neon_check_type (3, NS_QDS,
12777 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12778 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12779 neon_mul_mac (et, et.type == NT_unsigned);
12780 }
12781 else
12782 {
12783 struct neon_type_el et = neon_check_type (3, NS_QDD,
12784 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12785 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12786 neon_mixed_length (et, et.size);
12787 }
12788 }
12789
12790 static void
12791 do_neon_mac_maybe_scalar_long (void)
12792 {
12793 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12794 }
12795
12796 static void
12797 do_neon_dyadic_wide (void)
12798 {
12799 struct neon_type_el et = neon_check_type (3, NS_QQD,
12800 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12801 neon_mixed_length (et, et.size);
12802 }
12803
12804 static void
12805 do_neon_dyadic_narrow (void)
12806 {
12807 struct neon_type_el et = neon_check_type (3, NS_QDD,
12808 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
12809 /* Operand sign is unimportant, and the U bit is part of the opcode,
12810 so force the operand type to integer. */
12811 et.type = NT_integer;
12812 neon_mixed_length (et, et.size / 2);
12813 }
12814
12815 static void
12816 do_neon_mul_sat_scalar_long (void)
12817 {
12818 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
12819 }
12820
12821 static void
12822 do_neon_vmull (void)
12823 {
12824 if (inst.operands[2].isscalar)
12825 do_neon_mac_maybe_scalar_long ();
12826 else
12827 {
12828 struct neon_type_el et = neon_check_type (3, NS_QDD,
12829 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
12830 if (et.type == NT_poly)
12831 inst.instruction = NEON_ENC_POLY (inst.instruction);
12832 else
12833 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12834 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12835 zero. Should be OK as-is. */
12836 neon_mixed_length (et, et.size);
12837 }
12838 }
12839
12840 static void
12841 do_neon_ext (void)
12842 {
12843 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
12844 struct neon_type_el et = neon_check_type (3, rs,
12845 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12846 unsigned imm = (inst.operands[3].imm * et.size) / 8;
12847 constraint (imm >= (neon_quad (rs) ? 16 : 8), _("shift out of range"));
12848 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12849 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12850 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12851 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12852 inst.instruction |= LOW4 (inst.operands[2].reg);
12853 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12854 inst.instruction |= neon_quad (rs) << 6;
12855 inst.instruction |= imm << 8;
12856
12857 inst.instruction = neon_dp_fixup (inst.instruction);
12858 }
12859
12860 static void
12861 do_neon_rev (void)
12862 {
12863 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12864 struct neon_type_el et = neon_check_type (2, rs,
12865 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12866 unsigned op = (inst.instruction >> 7) & 3;
12867 /* N (width of reversed regions) is encoded as part of the bitmask. We
12868 extract it here to check the elements to be reversed are smaller.
12869 Otherwise we'd get a reserved instruction. */
12870 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
12871 assert (elsize != 0);
12872 constraint (et.size >= elsize,
12873 _("elements must be smaller than reversal region"));
12874 neon_two_same (neon_quad (rs), 1, et.size);
12875 }
12876
12877 static void
12878 do_neon_dup (void)
12879 {
12880 if (inst.operands[1].isscalar)
12881 {
12882 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
12883 struct neon_type_el et = neon_check_type (2, rs,
12884 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12885 unsigned sizebits = et.size >> 3;
12886 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
12887 int logsize = neon_logbits (et.size);
12888 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
12889
12890 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
12891 return;
12892
12893 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12894 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12895 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12896 inst.instruction |= LOW4 (dm);
12897 inst.instruction |= HI1 (dm) << 5;
12898 inst.instruction |= neon_quad (rs) << 6;
12899 inst.instruction |= x << 17;
12900 inst.instruction |= sizebits << 16;
12901
12902 inst.instruction = neon_dp_fixup (inst.instruction);
12903 }
12904 else
12905 {
12906 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
12907 struct neon_type_el et = neon_check_type (2, rs,
12908 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12909 /* Duplicate ARM register to lanes of vector. */
12910 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
12911 switch (et.size)
12912 {
12913 case 8: inst.instruction |= 0x400000; break;
12914 case 16: inst.instruction |= 0x000020; break;
12915 case 32: inst.instruction |= 0x000000; break;
12916 default: break;
12917 }
12918 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12919 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
12920 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
12921 inst.instruction |= neon_quad (rs) << 21;
12922 /* The encoding for this instruction is identical for the ARM and Thumb
12923 variants, except for the condition field. */
12924 do_vfp_cond_or_thumb ();
12925 }
12926 }
12927
12928 /* VMOV has particularly many variations. It can be one of:
12929 0. VMOV<c><q> <Qd>, <Qm>
12930 1. VMOV<c><q> <Dd>, <Dm>
12931 (Register operations, which are VORR with Rm = Rn.)
12932 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12933 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12934 (Immediate loads.)
12935 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12936 (ARM register to scalar.)
12937 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12938 (Two ARM registers to vector.)
12939 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12940 (Scalar to ARM register.)
12941 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12942 (Vector to two ARM registers.)
12943 8. VMOV.F32 <Sd>, <Sm>
12944 9. VMOV.F64 <Dd>, <Dm>
12945 (VFP register moves.)
12946 10. VMOV.F32 <Sd>, #imm
12947 11. VMOV.F64 <Dd>, #imm
12948 (VFP float immediate load.)
12949 12. VMOV <Rd>, <Sm>
12950 (VFP single to ARM reg.)
12951 13. VMOV <Sd>, <Rm>
12952 (ARM reg to VFP single.)
12953 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12954 (Two ARM regs to two VFP singles.)
12955 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12956 (Two VFP singles to two ARM regs.)
12957
12958 These cases can be disambiguated using neon_select_shape, except cases 1/9
12959 and 3/11 which depend on the operand type too.
12960
12961 All the encoded bits are hardcoded by this function.
12962
12963 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12964 Cases 5, 7 may be used with VFPv2 and above.
12965
12966 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12967 can specify a type where it doesn't make sense to, and is ignored).
12968 */
12969
12970 static void
12971 do_neon_mov (void)
12972 {
12973 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
12974 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
12975 NS_NULL);
12976 struct neon_type_el et;
12977 const char *ldconst = 0;
12978
12979 switch (rs)
12980 {
12981 case NS_DD: /* case 1/9. */
12982 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12983 /* It is not an error here if no type is given. */
12984 inst.error = NULL;
12985 if (et.type == NT_float && et.size == 64)
12986 {
12987 do_vfp_nsyn_opcode ("fcpyd");
12988 break;
12989 }
12990 /* fall through. */
12991
12992 case NS_QQ: /* case 0/1. */
12993 {
12994 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12995 return;
12996 /* The architecture manual I have doesn't explicitly state which
12997 value the U bit should have for register->register moves, but
12998 the equivalent VORR instruction has U = 0, so do that. */
12999 inst.instruction = 0x0200110;
13000 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13001 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13002 inst.instruction |= LOW4 (inst.operands[1].reg);
13003 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13004 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13005 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13006 inst.instruction |= neon_quad (rs) << 6;
13007
13008 inst.instruction = neon_dp_fixup (inst.instruction);
13009 }
13010 break;
13011
13012 case NS_DI: /* case 3/11. */
13013 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13014 inst.error = NULL;
13015 if (et.type == NT_float && et.size == 64)
13016 {
13017 /* case 11 (fconstd). */
13018 ldconst = "fconstd";
13019 goto encode_fconstd;
13020 }
13021 /* fall through. */
13022
13023 case NS_QI: /* case 2/3. */
13024 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13025 return;
13026 inst.instruction = 0x0800010;
13027 neon_move_immediate ();
13028 inst.instruction = neon_dp_fixup (inst.instruction);
13029 break;
13030
13031 case NS_SR: /* case 4. */
13032 {
13033 unsigned bcdebits = 0;
13034 struct neon_type_el et = neon_check_type (2, NS_NULL,
13035 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13036 int logsize = neon_logbits (et.size);
13037 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
13038 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
13039
13040 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13041 _(BAD_FPU));
13042 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13043 && et.size != 32, _(BAD_FPU));
13044 constraint (et.type == NT_invtype, _("bad type for scalar"));
13045 constraint (x >= 64 / et.size, _("scalar index out of range"));
13046
13047 switch (et.size)
13048 {
13049 case 8: bcdebits = 0x8; break;
13050 case 16: bcdebits = 0x1; break;
13051 case 32: bcdebits = 0x0; break;
13052 default: ;
13053 }
13054
13055 bcdebits |= x << logsize;
13056
13057 inst.instruction = 0xe000b10;
13058 do_vfp_cond_or_thumb ();
13059 inst.instruction |= LOW4 (dn) << 16;
13060 inst.instruction |= HI1 (dn) << 7;
13061 inst.instruction |= inst.operands[1].reg << 12;
13062 inst.instruction |= (bcdebits & 3) << 5;
13063 inst.instruction |= (bcdebits >> 2) << 21;
13064 }
13065 break;
13066
13067 case NS_DRR: /* case 5 (fmdrr). */
13068 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13069 _(BAD_FPU));
13070
13071 inst.instruction = 0xc400b10;
13072 do_vfp_cond_or_thumb ();
13073 inst.instruction |= LOW4 (inst.operands[0].reg);
13074 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
13075 inst.instruction |= inst.operands[1].reg << 12;
13076 inst.instruction |= inst.operands[2].reg << 16;
13077 break;
13078
13079 case NS_RS: /* case 6. */
13080 {
13081 struct neon_type_el et = neon_check_type (2, NS_NULL,
13082 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
13083 unsigned logsize = neon_logbits (et.size);
13084 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
13085 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
13086 unsigned abcdebits = 0;
13087
13088 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13089 _(BAD_FPU));
13090 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13091 && et.size != 32, _(BAD_FPU));
13092 constraint (et.type == NT_invtype, _("bad type for scalar"));
13093 constraint (x >= 64 / et.size, _("scalar index out of range"));
13094
13095 switch (et.size)
13096 {
13097 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
13098 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
13099 case 32: abcdebits = 0x00; break;
13100 default: ;
13101 }
13102
13103 abcdebits |= x << logsize;
13104 inst.instruction = 0xe100b10;
13105 do_vfp_cond_or_thumb ();
13106 inst.instruction |= LOW4 (dn) << 16;
13107 inst.instruction |= HI1 (dn) << 7;
13108 inst.instruction |= inst.operands[0].reg << 12;
13109 inst.instruction |= (abcdebits & 3) << 5;
13110 inst.instruction |= (abcdebits >> 2) << 21;
13111 }
13112 break;
13113
13114 case NS_RRD: /* case 7 (fmrrd). */
13115 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13116 _(BAD_FPU));
13117
13118 inst.instruction = 0xc500b10;
13119 do_vfp_cond_or_thumb ();
13120 inst.instruction |= inst.operands[0].reg << 12;
13121 inst.instruction |= inst.operands[1].reg << 16;
13122 inst.instruction |= LOW4 (inst.operands[2].reg);
13123 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13124 break;
13125
13126 case NS_FF: /* case 8 (fcpys). */
13127 do_vfp_nsyn_opcode ("fcpys");
13128 break;
13129
13130 case NS_FI: /* case 10 (fconsts). */
13131 ldconst = "fconsts";
13132 encode_fconstd:
13133 if (is_quarter_float (inst.operands[1].imm))
13134 {
13135 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
13136 do_vfp_nsyn_opcode (ldconst);
13137 }
13138 else
13139 first_error (_("immediate out of range"));
13140 break;
13141
13142 case NS_RF: /* case 12 (fmrs). */
13143 do_vfp_nsyn_opcode ("fmrs");
13144 break;
13145
13146 case NS_FR: /* case 13 (fmsr). */
13147 do_vfp_nsyn_opcode ("fmsr");
13148 break;
13149
13150 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13151 (one of which is a list), but we have parsed four. Do some fiddling to
13152 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13153 expect. */
13154 case NS_RRFF: /* case 14 (fmrrs). */
13155 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
13156 _("VFP registers must be adjacent"));
13157 inst.operands[2].imm = 2;
13158 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13159 do_vfp_nsyn_opcode ("fmrrs");
13160 break;
13161
13162 case NS_FFRR: /* case 15 (fmsrr). */
13163 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13164 _("VFP registers must be adjacent"));
13165 inst.operands[1] = inst.operands[2];
13166 inst.operands[2] = inst.operands[3];
13167 inst.operands[0].imm = 2;
13168 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13169 do_vfp_nsyn_opcode ("fmsrr");
13170 break;
13171
13172 default:
13173 abort ();
13174 }
13175 }
13176
13177 static void
13178 do_neon_rshift_round_imm (void)
13179 {
13180 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13181 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13182 int imm = inst.operands[2].imm;
13183
13184 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13185 if (imm == 0)
13186 {
13187 inst.operands[2].present = 0;
13188 do_neon_mov ();
13189 return;
13190 }
13191
13192 constraint (imm < 1 || (unsigned)imm > et.size,
13193 _("immediate out of range for shift"));
13194 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13195 et.size - imm);
13196 }
13197
13198 static void
13199 do_neon_movl (void)
13200 {
13201 struct neon_type_el et = neon_check_type (2, NS_QD,
13202 N_EQK | N_DBL, N_SU_32 | N_KEY);
13203 unsigned sizebits = et.size >> 3;
13204 inst.instruction |= sizebits << 19;
13205 neon_two_same (0, et.type == NT_unsigned, -1);
13206 }
13207
13208 static void
13209 do_neon_trn (void)
13210 {
13211 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13212 struct neon_type_el et = neon_check_type (2, rs,
13213 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13214 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13215 neon_two_same (neon_quad (rs), 1, et.size);
13216 }
13217
13218 static void
13219 do_neon_zip_uzp (void)
13220 {
13221 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13222 struct neon_type_el et = neon_check_type (2, rs,
13223 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13224 if (rs == NS_DD && et.size == 32)
13225 {
13226 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13227 inst.instruction = N_MNEM_vtrn;
13228 do_neon_trn ();
13229 return;
13230 }
13231 neon_two_same (neon_quad (rs), 1, et.size);
13232 }
13233
13234 static void
13235 do_neon_sat_abs_neg (void)
13236 {
13237 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13238 struct neon_type_el et = neon_check_type (2, rs,
13239 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13240 neon_two_same (neon_quad (rs), 1, et.size);
13241 }
13242
13243 static void
13244 do_neon_pair_long (void)
13245 {
13246 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13247 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13248 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13249 inst.instruction |= (et.type == NT_unsigned) << 7;
13250 neon_two_same (neon_quad (rs), 1, et.size);
13251 }
13252
13253 static void
13254 do_neon_recip_est (void)
13255 {
13256 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13257 struct neon_type_el et = neon_check_type (2, rs,
13258 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13259 inst.instruction |= (et.type == NT_float) << 8;
13260 neon_two_same (neon_quad (rs), 1, et.size);
13261 }
13262
13263 static void
13264 do_neon_cls (void)
13265 {
13266 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13267 struct neon_type_el et = neon_check_type (2, rs,
13268 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13269 neon_two_same (neon_quad (rs), 1, et.size);
13270 }
13271
13272 static void
13273 do_neon_clz (void)
13274 {
13275 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13276 struct neon_type_el et = neon_check_type (2, rs,
13277 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13278 neon_two_same (neon_quad (rs), 1, et.size);
13279 }
13280
13281 static void
13282 do_neon_cnt (void)
13283 {
13284 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13285 struct neon_type_el et = neon_check_type (2, rs,
13286 N_EQK | N_INT, N_8 | N_KEY);
13287 neon_two_same (neon_quad (rs), 1, et.size);
13288 }
13289
13290 static void
13291 do_neon_swp (void)
13292 {
13293 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13294 neon_two_same (neon_quad (rs), 1, -1);
13295 }
13296
13297 static void
13298 do_neon_tbl_tbx (void)
13299 {
13300 unsigned listlenbits;
13301 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13302
13303 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13304 {
13305 first_error (_("bad list length for table lookup"));
13306 return;
13307 }
13308
13309 listlenbits = inst.operands[1].imm - 1;
13310 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13311 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13312 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13313 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13314 inst.instruction |= LOW4 (inst.operands[2].reg);
13315 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13316 inst.instruction |= listlenbits << 8;
13317
13318 inst.instruction = neon_dp_fixup (inst.instruction);
13319 }
13320
13321 static void
13322 do_neon_ldm_stm (void)
13323 {
13324 /* P, U and L bits are part of bitmask. */
13325 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13326 unsigned offsetbits = inst.operands[1].imm * 2;
13327
13328 if (inst.operands[1].issingle)
13329 {
13330 do_vfp_nsyn_ldm_stm (is_dbmode);
13331 return;
13332 }
13333
13334 constraint (is_dbmode && !inst.operands[0].writeback,
13335 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13336
13337 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13338 _("register list must contain at least 1 and at most 16 "
13339 "registers"));
13340
13341 inst.instruction |= inst.operands[0].reg << 16;
13342 inst.instruction |= inst.operands[0].writeback << 21;
13343 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13344 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13345
13346 inst.instruction |= offsetbits;
13347
13348 do_vfp_cond_or_thumb ();
13349 }
13350
13351 static void
13352 do_neon_ldr_str (void)
13353 {
13354 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13355
13356 if (inst.operands[0].issingle)
13357 {
13358 if (is_ldr)
13359 do_vfp_nsyn_opcode ("flds");
13360 else
13361 do_vfp_nsyn_opcode ("fsts");
13362 }
13363 else
13364 {
13365 if (is_ldr)
13366 do_vfp_nsyn_opcode ("fldd");
13367 else
13368 do_vfp_nsyn_opcode ("fstd");
13369 }
13370 }
13371
13372 /* "interleave" version also handles non-interleaving register VLD1/VST1
13373 instructions. */
13374
13375 static void
13376 do_neon_ld_st_interleave (void)
13377 {
13378 struct neon_type_el et = neon_check_type (1, NS_NULL,
13379 N_8 | N_16 | N_32 | N_64);
13380 unsigned alignbits = 0;
13381 unsigned idx;
13382 /* The bits in this table go:
13383 0: register stride of one (0) or two (1)
13384 1,2: register list length, minus one (1, 2, 3, 4).
13385 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13386 We use -1 for invalid entries. */
13387 const int typetable[] =
13388 {
13389 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13390 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13391 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13392 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13393 };
13394 int typebits;
13395
13396 if (et.type == NT_invtype)
13397 return;
13398
13399 if (inst.operands[1].immisalign)
13400 switch (inst.operands[1].imm >> 8)
13401 {
13402 case 64: alignbits = 1; break;
13403 case 128:
13404 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13405 goto bad_alignment;
13406 alignbits = 2;
13407 break;
13408 case 256:
13409 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13410 goto bad_alignment;
13411 alignbits = 3;
13412 break;
13413 default:
13414 bad_alignment:
13415 first_error (_("bad alignment"));
13416 return;
13417 }
13418
13419 inst.instruction |= alignbits << 4;
13420 inst.instruction |= neon_logbits (et.size) << 6;
13421
13422 /* Bits [4:6] of the immediate in a list specifier encode register stride
13423 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13424 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13425 up the right value for "type" in a table based on this value and the given
13426 list style, then stick it back. */
13427 idx = ((inst.operands[0].imm >> 4) & 7)
13428 | (((inst.instruction >> 8) & 3) << 3);
13429
13430 typebits = typetable[idx];
13431
13432 constraint (typebits == -1, _("bad list type for instruction"));
13433
13434 inst.instruction &= ~0xf00;
13435 inst.instruction |= typebits << 8;
13436 }
13437
13438 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13439 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13440 otherwise. The variable arguments are a list of pairs of legal (size, align)
13441 values, terminated with -1. */
13442
13443 static int
13444 neon_alignment_bit (int size, int align, int *do_align, ...)
13445 {
13446 va_list ap;
13447 int result = FAIL, thissize, thisalign;
13448
13449 if (!inst.operands[1].immisalign)
13450 {
13451 *do_align = 0;
13452 return SUCCESS;
13453 }
13454
13455 va_start (ap, do_align);
13456
13457 do
13458 {
13459 thissize = va_arg (ap, int);
13460 if (thissize == -1)
13461 break;
13462 thisalign = va_arg (ap, int);
13463
13464 if (size == thissize && align == thisalign)
13465 result = SUCCESS;
13466 }
13467 while (result != SUCCESS);
13468
13469 va_end (ap);
13470
13471 if (result == SUCCESS)
13472 *do_align = 1;
13473 else
13474 first_error (_("unsupported alignment for instruction"));
13475
13476 return result;
13477 }
13478
13479 static void
13480 do_neon_ld_st_lane (void)
13481 {
13482 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13483 int align_good, do_align = 0;
13484 int logsize = neon_logbits (et.size);
13485 int align = inst.operands[1].imm >> 8;
13486 int n = (inst.instruction >> 8) & 3;
13487 int max_el = 64 / et.size;
13488
13489 if (et.type == NT_invtype)
13490 return;
13491
13492 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
13493 _("bad list length"));
13494 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
13495 _("scalar index out of range"));
13496 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
13497 && et.size == 8,
13498 _("stride of 2 unavailable when element size is 8"));
13499
13500 switch (n)
13501 {
13502 case 0: /* VLD1 / VST1. */
13503 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
13504 32, 32, -1);
13505 if (align_good == FAIL)
13506 return;
13507 if (do_align)
13508 {
13509 unsigned alignbits = 0;
13510 switch (et.size)
13511 {
13512 case 16: alignbits = 0x1; break;
13513 case 32: alignbits = 0x3; break;
13514 default: ;
13515 }
13516 inst.instruction |= alignbits << 4;
13517 }
13518 break;
13519
13520 case 1: /* VLD2 / VST2. */
13521 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
13522 32, 64, -1);
13523 if (align_good == FAIL)
13524 return;
13525 if (do_align)
13526 inst.instruction |= 1 << 4;
13527 break;
13528
13529 case 2: /* VLD3 / VST3. */
13530 constraint (inst.operands[1].immisalign,
13531 _("can't use alignment with this instruction"));
13532 break;
13533
13534 case 3: /* VLD4 / VST4. */
13535 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13536 16, 64, 32, 64, 32, 128, -1);
13537 if (align_good == FAIL)
13538 return;
13539 if (do_align)
13540 {
13541 unsigned alignbits = 0;
13542 switch (et.size)
13543 {
13544 case 8: alignbits = 0x1; break;
13545 case 16: alignbits = 0x1; break;
13546 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
13547 default: ;
13548 }
13549 inst.instruction |= alignbits << 4;
13550 }
13551 break;
13552
13553 default: ;
13554 }
13555
13556 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13557 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13558 inst.instruction |= 1 << (4 + logsize);
13559
13560 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
13561 inst.instruction |= logsize << 10;
13562 }
13563
13564 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13565
13566 static void
13567 do_neon_ld_dup (void)
13568 {
13569 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13570 int align_good, do_align = 0;
13571
13572 if (et.type == NT_invtype)
13573 return;
13574
13575 switch ((inst.instruction >> 8) & 3)
13576 {
13577 case 0: /* VLD1. */
13578 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
13579 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13580 &do_align, 16, 16, 32, 32, -1);
13581 if (align_good == FAIL)
13582 return;
13583 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
13584 {
13585 case 1: break;
13586 case 2: inst.instruction |= 1 << 5; break;
13587 default: first_error (_("bad list length")); return;
13588 }
13589 inst.instruction |= neon_logbits (et.size) << 6;
13590 break;
13591
13592 case 1: /* VLD2. */
13593 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13594 &do_align, 8, 16, 16, 32, 32, 64, -1);
13595 if (align_good == FAIL)
13596 return;
13597 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
13598 _("bad list length"));
13599 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13600 inst.instruction |= 1 << 5;
13601 inst.instruction |= neon_logbits (et.size) << 6;
13602 break;
13603
13604 case 2: /* VLD3. */
13605 constraint (inst.operands[1].immisalign,
13606 _("can't use alignment with this instruction"));
13607 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
13608 _("bad list length"));
13609 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13610 inst.instruction |= 1 << 5;
13611 inst.instruction |= neon_logbits (et.size) << 6;
13612 break;
13613
13614 case 3: /* VLD4. */
13615 {
13616 int align = inst.operands[1].imm >> 8;
13617 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13618 16, 64, 32, 64, 32, 128, -1);
13619 if (align_good == FAIL)
13620 return;
13621 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
13622 _("bad list length"));
13623 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13624 inst.instruction |= 1 << 5;
13625 if (et.size == 32 && align == 128)
13626 inst.instruction |= 0x3 << 6;
13627 else
13628 inst.instruction |= neon_logbits (et.size) << 6;
13629 }
13630 break;
13631
13632 default: ;
13633 }
13634
13635 inst.instruction |= do_align << 4;
13636 }
13637
13638 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13639 apart from bits [11:4]. */
13640
13641 static void
13642 do_neon_ldx_stx (void)
13643 {
13644 switch (NEON_LANE (inst.operands[0].imm))
13645 {
13646 case NEON_INTERLEAVE_LANES:
13647 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
13648 do_neon_ld_st_interleave ();
13649 break;
13650
13651 case NEON_ALL_LANES:
13652 inst.instruction = NEON_ENC_DUP (inst.instruction);
13653 do_neon_ld_dup ();
13654 break;
13655
13656 default:
13657 inst.instruction = NEON_ENC_LANE (inst.instruction);
13658 do_neon_ld_st_lane ();
13659 }
13660
13661 /* L bit comes from bit mask. */
13662 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13663 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13664 inst.instruction |= inst.operands[1].reg << 16;
13665
13666 if (inst.operands[1].postind)
13667 {
13668 int postreg = inst.operands[1].imm & 0xf;
13669 constraint (!inst.operands[1].immisreg,
13670 _("post-index must be a register"));
13671 constraint (postreg == 0xd || postreg == 0xf,
13672 _("bad register for post-index"));
13673 inst.instruction |= postreg;
13674 }
13675 else if (inst.operands[1].writeback)
13676 {
13677 inst.instruction |= 0xd;
13678 }
13679 else
13680 inst.instruction |= 0xf;
13681
13682 if (thumb_mode)
13683 inst.instruction |= 0xf9000000;
13684 else
13685 inst.instruction |= 0xf4000000;
13686 }
13687
13688 \f
13689 /* Overall per-instruction processing. */
13690
13691 /* We need to be able to fix up arbitrary expressions in some statements.
13692 This is so that we can handle symbols that are an arbitrary distance from
13693 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13694 which returns part of an address in a form which will be valid for
13695 a data instruction. We do this by pushing the expression into a symbol
13696 in the expr_section, and creating a fix for that. */
13697
13698 static void
13699 fix_new_arm (fragS * frag,
13700 int where,
13701 short int size,
13702 expressionS * exp,
13703 int pc_rel,
13704 int reloc)
13705 {
13706 fixS * new_fix;
13707
13708 switch (exp->X_op)
13709 {
13710 case O_constant:
13711 case O_symbol:
13712 case O_add:
13713 case O_subtract:
13714 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13715 break;
13716
13717 default:
13718 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13719 pc_rel, reloc);
13720 break;
13721 }
13722
13723 /* Mark whether the fix is to a THUMB instruction, or an ARM
13724 instruction. */
13725 new_fix->tc_fix_data = thumb_mode;
13726 }
13727
13728 /* Create a frg for an instruction requiring relaxation. */
13729 static void
13730 output_relax_insn (void)
13731 {
13732 char * to;
13733 symbolS *sym;
13734 int offset;
13735
13736 /* The size of the instruction is unknown, so tie the debug info to the
13737 start of the instruction. */
13738 dwarf2_emit_insn (0);
13739
13740 switch (inst.reloc.exp.X_op)
13741 {
13742 case O_symbol:
13743 sym = inst.reloc.exp.X_add_symbol;
13744 offset = inst.reloc.exp.X_add_number;
13745 break;
13746 case O_constant:
13747 sym = NULL;
13748 offset = inst.reloc.exp.X_add_number;
13749 break;
13750 default:
13751 sym = make_expr_symbol (&inst.reloc.exp);
13752 offset = 0;
13753 break;
13754 }
13755 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13756 inst.relax, sym, offset, NULL/*offset, opcode*/);
13757 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13758 }
13759
13760 /* Write a 32-bit thumb instruction to buf. */
13761 static void
13762 put_thumb32_insn (char * buf, unsigned long insn)
13763 {
13764 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13765 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13766 }
13767
13768 static void
13769 output_inst (const char * str)
13770 {
13771 char * to = NULL;
13772
13773 if (inst.error)
13774 {
13775 as_bad ("%s -- `%s'", inst.error, str);
13776 return;
13777 }
13778 if (inst.relax) {
13779 output_relax_insn();
13780 return;
13781 }
13782 if (inst.size == 0)
13783 return;
13784
13785 to = frag_more (inst.size);
13786
13787 if (thumb_mode && (inst.size > THUMB_SIZE))
13788 {
13789 assert (inst.size == (2 * THUMB_SIZE));
13790 put_thumb32_insn (to, inst.instruction);
13791 }
13792 else if (inst.size > INSN_SIZE)
13793 {
13794 assert (inst.size == (2 * INSN_SIZE));
13795 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13796 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13797 }
13798 else
13799 md_number_to_chars (to, inst.instruction, inst.size);
13800
13801 if (inst.reloc.type != BFD_RELOC_UNUSED)
13802 fix_new_arm (frag_now, to - frag_now->fr_literal,
13803 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
13804 inst.reloc.type);
13805
13806 dwarf2_emit_insn (inst.size);
13807 }
13808
13809 /* Tag values used in struct asm_opcode's tag field. */
13810 enum opcode_tag
13811 {
13812 OT_unconditional, /* Instruction cannot be conditionalized.
13813 The ARM condition field is still 0xE. */
13814 OT_unconditionalF, /* Instruction cannot be conditionalized
13815 and carries 0xF in its ARM condition field. */
13816 OT_csuffix, /* Instruction takes a conditional suffix. */
13817 OT_csuffixF, /* Some forms of the instruction take a conditional
13818 suffix, others place 0xF where the condition field
13819 would be. */
13820 OT_cinfix3, /* Instruction takes a conditional infix,
13821 beginning at character index 3. (In
13822 unified mode, it becomes a suffix.) */
13823 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
13824 tsts, cmps, cmns, and teqs. */
13825 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
13826 character index 3, even in unified mode. Used for
13827 legacy instructions where suffix and infix forms
13828 may be ambiguous. */
13829 OT_csuf_or_in3, /* Instruction takes either a conditional
13830 suffix or an infix at character index 3. */
13831 OT_odd_infix_unc, /* This is the unconditional variant of an
13832 instruction that takes a conditional infix
13833 at an unusual position. In unified mode,
13834 this variant will accept a suffix. */
13835 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
13836 are the conditional variants of instructions that
13837 take conditional infixes in unusual positions.
13838 The infix appears at character index
13839 (tag - OT_odd_infix_0). These are not accepted
13840 in unified mode. */
13841 };
13842
13843 /* Subroutine of md_assemble, responsible for looking up the primary
13844 opcode from the mnemonic the user wrote. STR points to the
13845 beginning of the mnemonic.
13846
13847 This is not simply a hash table lookup, because of conditional
13848 variants. Most instructions have conditional variants, which are
13849 expressed with a _conditional affix_ to the mnemonic. If we were
13850 to encode each conditional variant as a literal string in the opcode
13851 table, it would have approximately 20,000 entries.
13852
13853 Most mnemonics take this affix as a suffix, and in unified syntax,
13854 'most' is upgraded to 'all'. However, in the divided syntax, some
13855 instructions take the affix as an infix, notably the s-variants of
13856 the arithmetic instructions. Of those instructions, all but six
13857 have the infix appear after the third character of the mnemonic.
13858
13859 Accordingly, the algorithm for looking up primary opcodes given
13860 an identifier is:
13861
13862 1. Look up the identifier in the opcode table.
13863 If we find a match, go to step U.
13864
13865 2. Look up the last two characters of the identifier in the
13866 conditions table. If we find a match, look up the first N-2
13867 characters of the identifier in the opcode table. If we
13868 find a match, go to step CE.
13869
13870 3. Look up the fourth and fifth characters of the identifier in
13871 the conditions table. If we find a match, extract those
13872 characters from the identifier, and look up the remaining
13873 characters in the opcode table. If we find a match, go
13874 to step CM.
13875
13876 4. Fail.
13877
13878 U. Examine the tag field of the opcode structure, in case this is
13879 one of the six instructions with its conditional infix in an
13880 unusual place. If it is, the tag tells us where to find the
13881 infix; look it up in the conditions table and set inst.cond
13882 accordingly. Otherwise, this is an unconditional instruction.
13883 Again set inst.cond accordingly. Return the opcode structure.
13884
13885 CE. Examine the tag field to make sure this is an instruction that
13886 should receive a conditional suffix. If it is not, fail.
13887 Otherwise, set inst.cond from the suffix we already looked up,
13888 and return the opcode structure.
13889
13890 CM. Examine the tag field to make sure this is an instruction that
13891 should receive a conditional infix after the third character.
13892 If it is not, fail. Otherwise, undo the edits to the current
13893 line of input and proceed as for case CE. */
13894
13895 static const struct asm_opcode *
13896 opcode_lookup (char **str)
13897 {
13898 char *end, *base;
13899 char *affix;
13900 const struct asm_opcode *opcode;
13901 const struct asm_cond *cond;
13902 char save[2];
13903 bfd_boolean neon_supported;
13904
13905 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
13906
13907 /* Scan up to the end of the mnemonic, which must end in white space,
13908 '.' (in unified mode, or for Neon instructions), or end of string. */
13909 for (base = end = *str; *end != '\0'; end++)
13910 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
13911 break;
13912
13913 if (end == base)
13914 return 0;
13915
13916 /* Handle a possible width suffix and/or Neon type suffix. */
13917 if (end[0] == '.')
13918 {
13919 int offset = 2;
13920
13921 /* The .w and .n suffixes are only valid if the unified syntax is in
13922 use. */
13923 if (unified_syntax && end[1] == 'w')
13924 inst.size_req = 4;
13925 else if (unified_syntax && end[1] == 'n')
13926 inst.size_req = 2;
13927 else
13928 offset = 0;
13929
13930 inst.vectype.elems = 0;
13931
13932 *str = end + offset;
13933
13934 if (end[offset] == '.')
13935 {
13936 /* See if we have a Neon type suffix (possible in either unified or
13937 non-unified ARM syntax mode). */
13938 if (parse_neon_type (&inst.vectype, str) == FAIL)
13939 return 0;
13940 }
13941 else if (end[offset] != '\0' && end[offset] != ' ')
13942 return 0;
13943 }
13944 else
13945 *str = end;
13946
13947 /* Look for unaffixed or special-case affixed mnemonic. */
13948 opcode = hash_find_n (arm_ops_hsh, base, end - base);
13949 if (opcode)
13950 {
13951 /* step U */
13952 if (opcode->tag < OT_odd_infix_0)
13953 {
13954 inst.cond = COND_ALWAYS;
13955 return opcode;
13956 }
13957
13958 if (unified_syntax)
13959 as_warn (_("conditional infixes are deprecated in unified syntax"));
13960 affix = base + (opcode->tag - OT_odd_infix_0);
13961 cond = hash_find_n (arm_cond_hsh, affix, 2);
13962 assert (cond);
13963
13964 inst.cond = cond->value;
13965 return opcode;
13966 }
13967
13968 /* Cannot have a conditional suffix on a mnemonic of less than two
13969 characters. */
13970 if (end - base < 3)
13971 return 0;
13972
13973 /* Look for suffixed mnemonic. */
13974 affix = end - 2;
13975 cond = hash_find_n (arm_cond_hsh, affix, 2);
13976 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
13977 if (opcode && cond)
13978 {
13979 /* step CE */
13980 switch (opcode->tag)
13981 {
13982 case OT_cinfix3_legacy:
13983 /* Ignore conditional suffixes matched on infix only mnemonics. */
13984 break;
13985
13986 case OT_cinfix3:
13987 case OT_cinfix3_deprecated:
13988 case OT_odd_infix_unc:
13989 if (!unified_syntax)
13990 return 0;
13991 /* else fall through */
13992
13993 case OT_csuffix:
13994 case OT_csuffixF:
13995 case OT_csuf_or_in3:
13996 inst.cond = cond->value;
13997 return opcode;
13998
13999 case OT_unconditional:
14000 case OT_unconditionalF:
14001 if (thumb_mode)
14002 {
14003 inst.cond = cond->value;
14004 }
14005 else
14006 {
14007 /* delayed diagnostic */
14008 inst.error = BAD_COND;
14009 inst.cond = COND_ALWAYS;
14010 }
14011 return opcode;
14012
14013 default:
14014 return 0;
14015 }
14016 }
14017
14018 /* Cannot have a usual-position infix on a mnemonic of less than
14019 six characters (five would be a suffix). */
14020 if (end - base < 6)
14021 return 0;
14022
14023 /* Look for infixed mnemonic in the usual position. */
14024 affix = base + 3;
14025 cond = hash_find_n (arm_cond_hsh, affix, 2);
14026 if (!cond)
14027 return 0;
14028
14029 memcpy (save, affix, 2);
14030 memmove (affix, affix + 2, (end - affix) - 2);
14031 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
14032 memmove (affix + 2, affix, (end - affix) - 2);
14033 memcpy (affix, save, 2);
14034
14035 if (opcode
14036 && (opcode->tag == OT_cinfix3
14037 || opcode->tag == OT_cinfix3_deprecated
14038 || opcode->tag == OT_csuf_or_in3
14039 || opcode->tag == OT_cinfix3_legacy))
14040 {
14041 /* step CM */
14042 if (unified_syntax
14043 && (opcode->tag == OT_cinfix3
14044 || opcode->tag == OT_cinfix3_deprecated))
14045 as_warn (_("conditional infixes are deprecated in unified syntax"));
14046
14047 inst.cond = cond->value;
14048 return opcode;
14049 }
14050
14051 return 0;
14052 }
14053
14054 void
14055 md_assemble (char *str)
14056 {
14057 char *p = str;
14058 const struct asm_opcode * opcode;
14059
14060 /* Align the previous label if needed. */
14061 if (last_label_seen != NULL)
14062 {
14063 symbol_set_frag (last_label_seen, frag_now);
14064 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
14065 S_SET_SEGMENT (last_label_seen, now_seg);
14066 }
14067
14068 memset (&inst, '\0', sizeof (inst));
14069 inst.reloc.type = BFD_RELOC_UNUSED;
14070
14071 opcode = opcode_lookup (&p);
14072 if (!opcode)
14073 {
14074 /* It wasn't an instruction, but it might be a register alias of
14075 the form alias .req reg, or a Neon .dn/.qn directive. */
14076 if (!create_register_alias (str, p)
14077 && !create_neon_reg_alias (str, p))
14078 as_bad (_("bad instruction `%s'"), str);
14079
14080 return;
14081 }
14082
14083 if (opcode->tag == OT_cinfix3_deprecated)
14084 as_warn (_("s suffix on comparison instruction is deprecated"));
14085
14086 /* The value which unconditional instructions should have in place of the
14087 condition field. */
14088 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
14089
14090 if (thumb_mode)
14091 {
14092 arm_feature_set variant;
14093
14094 variant = cpu_variant;
14095 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14096 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
14097 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
14098 /* Check that this instruction is supported for this CPU. */
14099 if (!opcode->tvariant
14100 || (thumb_mode == 1
14101 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
14102 {
14103 as_bad (_("selected processor does not support `%s'"), str);
14104 return;
14105 }
14106 if (inst.cond != COND_ALWAYS && !unified_syntax
14107 && opcode->tencode != do_t_branch)
14108 {
14109 as_bad (_("Thumb does not support conditional execution"));
14110 return;
14111 }
14112
14113 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2) && !inst.size_req)
14114 {
14115 /* Implicit require narrow instructions on Thumb-1. This avoids
14116 relaxation accidentally introducing Thumb-2 instructions. */
14117 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23)
14118 inst.size_req = 2;
14119 }
14120
14121 /* Check conditional suffixes. */
14122 if (current_it_mask)
14123 {
14124 int cond;
14125 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
14126 current_it_mask <<= 1;
14127 current_it_mask &= 0x1f;
14128 /* The BKPT instruction is unconditional even in an IT block. */
14129 if (!inst.error
14130 && cond != inst.cond && opcode->tencode != do_t_bkpt)
14131 {
14132 as_bad (_("incorrect condition in IT block"));
14133 return;
14134 }
14135 }
14136 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
14137 {
14138 as_bad (_("thumb conditional instrunction not in IT block"));
14139 return;
14140 }
14141
14142 mapping_state (MAP_THUMB);
14143 inst.instruction = opcode->tvalue;
14144
14145 if (!parse_operands (p, opcode->operands))
14146 opcode->tencode ();
14147
14148 /* Clear current_it_mask at the end of an IT block. */
14149 if (current_it_mask == 0x10)
14150 current_it_mask = 0;
14151
14152 if (!(inst.error || inst.relax))
14153 {
14154 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
14155 inst.size = (inst.instruction > 0xffff ? 4 : 2);
14156 if (inst.size_req && inst.size_req != inst.size)
14157 {
14158 as_bad (_("cannot honor width suffix -- `%s'"), str);
14159 return;
14160 }
14161 }
14162
14163 /* Something has gone badly wrong if we try to relax a fixed size
14164 instruction. */
14165 assert (inst.size_req == 0 || !inst.relax);
14166
14167 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14168 *opcode->tvariant);
14169 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14170 set those bits when Thumb-2 32-bit instructions are seen. ie.
14171 anything other than bl/blx.
14172 This is overly pessimistic for relaxable instructions. */
14173 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
14174 || inst.relax)
14175 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14176 arm_ext_v6t2);
14177 }
14178 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
14179 {
14180 /* Check that this instruction is supported for this CPU. */
14181 if (!opcode->avariant ||
14182 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
14183 {
14184 as_bad (_("selected processor does not support `%s'"), str);
14185 return;
14186 }
14187 if (inst.size_req)
14188 {
14189 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14190 return;
14191 }
14192
14193 mapping_state (MAP_ARM);
14194 inst.instruction = opcode->avalue;
14195 if (opcode->tag == OT_unconditionalF)
14196 inst.instruction |= 0xF << 28;
14197 else
14198 inst.instruction |= inst.cond << 28;
14199 inst.size = INSN_SIZE;
14200 if (!parse_operands (p, opcode->operands))
14201 opcode->aencode ();
14202 /* Arm mode bx is marked as both v4T and v5 because it's still required
14203 on a hypothetical non-thumb v5 core. */
14204 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
14205 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
14206 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14207 else
14208 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14209 *opcode->avariant);
14210 }
14211 else
14212 {
14213 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14214 "-- `%s'"), str);
14215 return;
14216 }
14217 output_inst (str);
14218 }
14219
14220 /* Various frobbings of labels and their addresses. */
14221
14222 void
14223 arm_start_line_hook (void)
14224 {
14225 last_label_seen = NULL;
14226 }
14227
14228 void
14229 arm_frob_label (symbolS * sym)
14230 {
14231 last_label_seen = sym;
14232
14233 ARM_SET_THUMB (sym, thumb_mode);
14234
14235 #if defined OBJ_COFF || defined OBJ_ELF
14236 ARM_SET_INTERWORK (sym, support_interwork);
14237 #endif
14238
14239 /* Note - do not allow local symbols (.Lxxx) to be labeled
14240 as Thumb functions. This is because these labels, whilst
14241 they exist inside Thumb code, are not the entry points for
14242 possible ARM->Thumb calls. Also, these labels can be used
14243 as part of a computed goto or switch statement. eg gcc
14244 can generate code that looks like this:
14245
14246 ldr r2, [pc, .Laaa]
14247 lsl r3, r3, #2
14248 ldr r2, [r3, r2]
14249 mov pc, r2
14250
14251 .Lbbb: .word .Lxxx
14252 .Lccc: .word .Lyyy
14253 ..etc...
14254 .Laaa: .word Lbbb
14255
14256 The first instruction loads the address of the jump table.
14257 The second instruction converts a table index into a byte offset.
14258 The third instruction gets the jump address out of the table.
14259 The fourth instruction performs the jump.
14260
14261 If the address stored at .Laaa is that of a symbol which has the
14262 Thumb_Func bit set, then the linker will arrange for this address
14263 to have the bottom bit set, which in turn would mean that the
14264 address computation performed by the third instruction would end
14265 up with the bottom bit set. Since the ARM is capable of unaligned
14266 word loads, the instruction would then load the incorrect address
14267 out of the jump table, and chaos would ensue. */
14268 if (label_is_thumb_function_name
14269 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14270 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14271 {
14272 /* When the address of a Thumb function is taken the bottom
14273 bit of that address should be set. This will allow
14274 interworking between Arm and Thumb functions to work
14275 correctly. */
14276
14277 THUMB_SET_FUNC (sym, 1);
14278
14279 label_is_thumb_function_name = FALSE;
14280 }
14281
14282 dwarf2_emit_label (sym);
14283 }
14284
14285 int
14286 arm_data_in_code (void)
14287 {
14288 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14289 {
14290 *input_line_pointer = '/';
14291 input_line_pointer += 5;
14292 *input_line_pointer = 0;
14293 return 1;
14294 }
14295
14296 return 0;
14297 }
14298
14299 char *
14300 arm_canonicalize_symbol_name (char * name)
14301 {
14302 int len;
14303
14304 if (thumb_mode && (len = strlen (name)) > 5
14305 && streq (name + len - 5, "/data"))
14306 *(name + len - 5) = 0;
14307
14308 return name;
14309 }
14310 \f
14311 /* Table of all register names defined by default. The user can
14312 define additional names with .req. Note that all register names
14313 should appear in both upper and lowercase variants. Some registers
14314 also have mixed-case names. */
14315
14316 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14317 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14318 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14319 #define REGSET(p,t) \
14320 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14321 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14322 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14323 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14324 #define REGSETH(p,t) \
14325 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14326 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14327 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14328 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14329 #define REGSET2(p,t) \
14330 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14331 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14332 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14333 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14334
14335 static const struct reg_entry reg_names[] =
14336 {
14337 /* ARM integer registers. */
14338 REGSET(r, RN), REGSET(R, RN),
14339
14340 /* ATPCS synonyms. */
14341 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14342 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14343 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14344
14345 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14346 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14347 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14348
14349 /* Well-known aliases. */
14350 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14351 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14352
14353 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14354 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14355
14356 /* Coprocessor numbers. */
14357 REGSET(p, CP), REGSET(P, CP),
14358
14359 /* Coprocessor register numbers. The "cr" variants are for backward
14360 compatibility. */
14361 REGSET(c, CN), REGSET(C, CN),
14362 REGSET(cr, CN), REGSET(CR, CN),
14363
14364 /* FPA registers. */
14365 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14366 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14367
14368 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14369 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14370
14371 /* VFP SP registers. */
14372 REGSET(s,VFS), REGSET(S,VFS),
14373 REGSETH(s,VFS), REGSETH(S,VFS),
14374
14375 /* VFP DP Registers. */
14376 REGSET(d,VFD), REGSET(D,VFD),
14377 /* Extra Neon DP registers. */
14378 REGSETH(d,VFD), REGSETH(D,VFD),
14379
14380 /* Neon QP registers. */
14381 REGSET2(q,NQ), REGSET2(Q,NQ),
14382
14383 /* VFP control registers. */
14384 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14385 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14386
14387 /* Maverick DSP coprocessor registers. */
14388 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
14389 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
14390
14391 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
14392 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
14393 REGDEF(dspsc,0,DSPSC),
14394
14395 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
14396 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
14397 REGDEF(DSPSC,0,DSPSC),
14398
14399 /* iWMMXt data registers - p0, c0-15. */
14400 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
14401
14402 /* iWMMXt control registers - p1, c0-3. */
14403 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
14404 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
14405 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
14406 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
14407
14408 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14409 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
14410 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
14411 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
14412 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
14413
14414 /* XScale accumulator registers. */
14415 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
14416 };
14417 #undef REGDEF
14418 #undef REGNUM
14419 #undef REGSET
14420
14421 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14422 within psr_required_here. */
14423 static const struct asm_psr psrs[] =
14424 {
14425 /* Backward compatibility notation. Note that "all" is no longer
14426 truly all possible PSR bits. */
14427 {"all", PSR_c | PSR_f},
14428 {"flg", PSR_f},
14429 {"ctl", PSR_c},
14430
14431 /* Individual flags. */
14432 {"f", PSR_f},
14433 {"c", PSR_c},
14434 {"x", PSR_x},
14435 {"s", PSR_s},
14436 /* Combinations of flags. */
14437 {"fs", PSR_f | PSR_s},
14438 {"fx", PSR_f | PSR_x},
14439 {"fc", PSR_f | PSR_c},
14440 {"sf", PSR_s | PSR_f},
14441 {"sx", PSR_s | PSR_x},
14442 {"sc", PSR_s | PSR_c},
14443 {"xf", PSR_x | PSR_f},
14444 {"xs", PSR_x | PSR_s},
14445 {"xc", PSR_x | PSR_c},
14446 {"cf", PSR_c | PSR_f},
14447 {"cs", PSR_c | PSR_s},
14448 {"cx", PSR_c | PSR_x},
14449 {"fsx", PSR_f | PSR_s | PSR_x},
14450 {"fsc", PSR_f | PSR_s | PSR_c},
14451 {"fxs", PSR_f | PSR_x | PSR_s},
14452 {"fxc", PSR_f | PSR_x | PSR_c},
14453 {"fcs", PSR_f | PSR_c | PSR_s},
14454 {"fcx", PSR_f | PSR_c | PSR_x},
14455 {"sfx", PSR_s | PSR_f | PSR_x},
14456 {"sfc", PSR_s | PSR_f | PSR_c},
14457 {"sxf", PSR_s | PSR_x | PSR_f},
14458 {"sxc", PSR_s | PSR_x | PSR_c},
14459 {"scf", PSR_s | PSR_c | PSR_f},
14460 {"scx", PSR_s | PSR_c | PSR_x},
14461 {"xfs", PSR_x | PSR_f | PSR_s},
14462 {"xfc", PSR_x | PSR_f | PSR_c},
14463 {"xsf", PSR_x | PSR_s | PSR_f},
14464 {"xsc", PSR_x | PSR_s | PSR_c},
14465 {"xcf", PSR_x | PSR_c | PSR_f},
14466 {"xcs", PSR_x | PSR_c | PSR_s},
14467 {"cfs", PSR_c | PSR_f | PSR_s},
14468 {"cfx", PSR_c | PSR_f | PSR_x},
14469 {"csf", PSR_c | PSR_s | PSR_f},
14470 {"csx", PSR_c | PSR_s | PSR_x},
14471 {"cxf", PSR_c | PSR_x | PSR_f},
14472 {"cxs", PSR_c | PSR_x | PSR_s},
14473 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
14474 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
14475 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
14476 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
14477 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
14478 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
14479 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
14480 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
14481 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
14482 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
14483 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
14484 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
14485 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
14486 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
14487 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
14488 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
14489 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
14490 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
14491 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
14492 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
14493 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
14494 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
14495 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
14496 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
14497 };
14498
14499 /* Table of V7M psr names. */
14500 static const struct asm_psr v7m_psrs[] =
14501 {
14502 {"apsr", 0 },
14503 {"iapsr", 1 },
14504 {"eapsr", 2 },
14505 {"psr", 3 },
14506 {"ipsr", 5 },
14507 {"epsr", 6 },
14508 {"iepsr", 7 },
14509 {"msp", 8 },
14510 {"psp", 9 },
14511 {"primask", 16},
14512 {"basepri", 17},
14513 {"basepri_max", 18},
14514 {"faultmask", 19},
14515 {"control", 20}
14516 };
14517
14518 /* Table of all shift-in-operand names. */
14519 static const struct asm_shift_name shift_names [] =
14520 {
14521 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
14522 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
14523 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
14524 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
14525 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
14526 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
14527 };
14528
14529 /* Table of all explicit relocation names. */
14530 #ifdef OBJ_ELF
14531 static struct reloc_entry reloc_names[] =
14532 {
14533 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
14534 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
14535 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
14536 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
14537 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
14538 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
14539 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
14540 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
14541 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
14542 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
14543 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
14544 };
14545 #endif
14546
14547 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14548 static const struct asm_cond conds[] =
14549 {
14550 {"eq", 0x0},
14551 {"ne", 0x1},
14552 {"cs", 0x2}, {"hs", 0x2},
14553 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14554 {"mi", 0x4},
14555 {"pl", 0x5},
14556 {"vs", 0x6},
14557 {"vc", 0x7},
14558 {"hi", 0x8},
14559 {"ls", 0x9},
14560 {"ge", 0xa},
14561 {"lt", 0xb},
14562 {"gt", 0xc},
14563 {"le", 0xd},
14564 {"al", 0xe}
14565 };
14566
14567 static struct asm_barrier_opt barrier_opt_names[] =
14568 {
14569 { "sy", 0xf },
14570 { "un", 0x7 },
14571 { "st", 0xe },
14572 { "unst", 0x6 }
14573 };
14574
14575 /* Table of ARM-format instructions. */
14576
14577 /* Macros for gluing together operand strings. N.B. In all cases
14578 other than OPS0, the trailing OP_stop comes from default
14579 zero-initialization of the unspecified elements of the array. */
14580 #define OPS0() { OP_stop, }
14581 #define OPS1(a) { OP_##a, }
14582 #define OPS2(a,b) { OP_##a,OP_##b, }
14583 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14584 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14585 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14586 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14587
14588 /* These macros abstract out the exact format of the mnemonic table and
14589 save some repeated characters. */
14590
14591 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14592 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14593 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14594 THUMB_VARIANT, do_##ae, do_##te }
14595
14596 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14597 a T_MNEM_xyz enumerator. */
14598 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14599 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14600 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14601 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14602
14603 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14604 infix after the third character. */
14605 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14606 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14607 THUMB_VARIANT, do_##ae, do_##te }
14608 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14609 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14610 THUMB_VARIANT, do_##ae, do_##te }
14611 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14612 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14613 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14614 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14615 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14616 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14617 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14618 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14619
14620 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14621 appear in the condition table. */
14622 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14623 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14624 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14625
14626 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14627 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14628 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14629 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14630 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14631 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14632 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14633 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14634 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14635 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14636 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14637 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14638 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14639 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14640 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14641 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14642 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14643 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14644 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14645 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14646
14647 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14648 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14649 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14650 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14651
14652 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14653 field is still 0xE. Many of the Thumb variants can be executed
14654 conditionally, so this is checked separately. */
14655 #define TUE(mnem, op, top, nops, ops, ae, te) \
14656 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14657 THUMB_VARIANT, do_##ae, do_##te }
14658
14659 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14660 condition code field. */
14661 #define TUF(mnem, op, top, nops, ops, ae, te) \
14662 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14663 THUMB_VARIANT, do_##ae, do_##te }
14664
14665 /* ARM-only variants of all the above. */
14666 #define CE(mnem, op, nops, ops, ae) \
14667 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14668
14669 #define C3(mnem, op, nops, ops, ae) \
14670 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14671
14672 /* Legacy mnemonics that always have conditional infix after the third
14673 character. */
14674 #define CL(mnem, op, nops, ops, ae) \
14675 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14676 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14677
14678 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14679 #define cCE(mnem, op, nops, ops, ae) \
14680 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14681
14682 /* Legacy coprocessor instructions where conditional infix and conditional
14683 suffix are ambiguous. For consistency this includes all FPA instructions,
14684 not just the potentially ambiguous ones. */
14685 #define cCL(mnem, op, nops, ops, ae) \
14686 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14687 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14688
14689 /* Coprocessor, takes either a suffix or a position-3 infix
14690 (for an FPA corner case). */
14691 #define C3E(mnem, op, nops, ops, ae) \
14692 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14693 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14694
14695 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14696 { #m1 #m2 #m3, OPS##nops ops, \
14697 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14698 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14699
14700 #define CM(m1, m2, op, nops, ops, ae) \
14701 xCM_(m1, , m2, op, nops, ops, ae), \
14702 xCM_(m1, eq, m2, op, nops, ops, ae), \
14703 xCM_(m1, ne, m2, op, nops, ops, ae), \
14704 xCM_(m1, cs, m2, op, nops, ops, ae), \
14705 xCM_(m1, hs, m2, op, nops, ops, ae), \
14706 xCM_(m1, cc, m2, op, nops, ops, ae), \
14707 xCM_(m1, ul, m2, op, nops, ops, ae), \
14708 xCM_(m1, lo, m2, op, nops, ops, ae), \
14709 xCM_(m1, mi, m2, op, nops, ops, ae), \
14710 xCM_(m1, pl, m2, op, nops, ops, ae), \
14711 xCM_(m1, vs, m2, op, nops, ops, ae), \
14712 xCM_(m1, vc, m2, op, nops, ops, ae), \
14713 xCM_(m1, hi, m2, op, nops, ops, ae), \
14714 xCM_(m1, ls, m2, op, nops, ops, ae), \
14715 xCM_(m1, ge, m2, op, nops, ops, ae), \
14716 xCM_(m1, lt, m2, op, nops, ops, ae), \
14717 xCM_(m1, gt, m2, op, nops, ops, ae), \
14718 xCM_(m1, le, m2, op, nops, ops, ae), \
14719 xCM_(m1, al, m2, op, nops, ops, ae)
14720
14721 #define UE(mnem, op, nops, ops, ae) \
14722 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14723
14724 #define UF(mnem, op, nops, ops, ae) \
14725 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14726
14727 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14728 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14729 use the same encoding function for each. */
14730 #define NUF(mnem, op, nops, ops, enc) \
14731 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14732 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14733
14734 /* Neon data processing, version which indirects through neon_enc_tab for
14735 the various overloaded versions of opcodes. */
14736 #define nUF(mnem, op, nops, ops, enc) \
14737 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14738 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14739
14740 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14741 version. */
14742 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14743 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14744 THUMB_VARIANT, do_##enc, do_##enc }
14745
14746 #define NCE(mnem, op, nops, ops, enc) \
14747 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14748
14749 #define NCEF(mnem, op, nops, ops, enc) \
14750 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14751
14752 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14753 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14754 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14755 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14756
14757 #define nCE(mnem, op, nops, ops, enc) \
14758 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14759
14760 #define nCEF(mnem, op, nops, ops, enc) \
14761 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14762
14763 #define do_0 0
14764
14765 /* Thumb-only, unconditional. */
14766 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14767
14768 static const struct asm_opcode insns[] =
14769 {
14770 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14771 #define THUMB_VARIANT &arm_ext_v4t
14772 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14773 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14774 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14775 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14776 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14777 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14778 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
14779 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
14780 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14781 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14782 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14783 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14784 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14785 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14786 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14787 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
14788
14789 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14790 for setting PSR flag bits. They are obsolete in V6 and do not
14791 have Thumb equivalents. */
14792 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14793 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14794 CL(tstp, 110f000, 2, (RR, SH), cmp),
14795 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14796 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14797 CL(cmpp, 150f000, 2, (RR, SH), cmp),
14798 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14799 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14800 CL(cmnp, 170f000, 2, (RR, SH), cmp),
14801
14802 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
14803 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
14804 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
14805 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
14806
14807 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
14808 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14809 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
14810 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14811
14812 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14813 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14814 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14815 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14816 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14817 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14818
14819 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
14820 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
14821 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
14822 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
14823
14824 /* Pseudo ops. */
14825 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
14826 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
14827 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
14828
14829 /* Thumb-compatibility pseudo ops. */
14830 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
14831 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
14832 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
14833 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
14834 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
14835 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
14836 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
14837 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
14838 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
14839 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
14840 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
14841 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
14842
14843 /* These may simplify to neg. */
14844 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
14845 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
14846
14847 #undef THUMB_VARIANT
14848 #define THUMB_VARIANT &arm_ext_v6
14849 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
14850
14851 /* V1 instructions with no Thumb analogue prior to V6T2. */
14852 #undef THUMB_VARIANT
14853 #define THUMB_VARIANT &arm_ext_v6t2
14854 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14855 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14856 CL(teqp, 130f000, 2, (RR, SH), cmp),
14857
14858 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
14859 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
14860 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
14861 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
14862
14863 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14864 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14865
14866 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14867 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14868
14869 /* V1 instructions with no Thumb analogue at all. */
14870 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
14871 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
14872
14873 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
14874 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
14875 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
14876 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
14877 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
14878 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
14879 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
14880 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
14881
14882 #undef ARM_VARIANT
14883 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14884 #undef THUMB_VARIANT
14885 #define THUMB_VARIANT &arm_ext_v4t
14886 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14887 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14888
14889 #undef THUMB_VARIANT
14890 #define THUMB_VARIANT &arm_ext_v6t2
14891 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14892 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
14893
14894 /* Generic coprocessor instructions. */
14895 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14896 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14897 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14898 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14899 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14900 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14901 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14902
14903 #undef ARM_VARIANT
14904 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14905 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14906 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14907
14908 #undef ARM_VARIANT
14909 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14910 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
14911 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
14912
14913 #undef ARM_VARIANT
14914 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14915 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14916 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14917 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14918 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14919 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14920 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14921 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14922 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14923
14924 #undef ARM_VARIANT
14925 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14926 #undef THUMB_VARIANT
14927 #define THUMB_VARIANT &arm_ext_v4t
14928 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14929 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14930 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14931 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14932 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14933 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14934
14935 #undef ARM_VARIANT
14936 #define ARM_VARIANT &arm_ext_v4t_5
14937 /* ARM Architecture 4T. */
14938 /* Note: bx (and blx) are required on V5, even if the processor does
14939 not support Thumb. */
14940 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
14941
14942 #undef ARM_VARIANT
14943 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14944 #undef THUMB_VARIANT
14945 #define THUMB_VARIANT &arm_ext_v5t
14946 /* Note: blx has 2 variants; the .value coded here is for
14947 BLX(2). Only this variant has conditional execution. */
14948 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
14949 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
14950
14951 #undef THUMB_VARIANT
14952 #define THUMB_VARIANT &arm_ext_v6t2
14953 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
14954 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14955 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14956 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14957 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14958 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14959 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14960 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14961
14962 #undef ARM_VARIANT
14963 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14964 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14965 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14966 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14967 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14968
14969 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14970 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14971
14972 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14973 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14974 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14975 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14976
14977 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14978 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14979 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14980 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14981
14982 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14983 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14984
14985 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14986 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14987 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14988 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14989
14990 #undef ARM_VARIANT
14991 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14992 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
14993 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14994 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14995
14996 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14997 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14998
14999 #undef ARM_VARIANT
15000 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
15001 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
15002
15003 #undef ARM_VARIANT
15004 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
15005 #undef THUMB_VARIANT
15006 #define THUMB_VARIANT &arm_ext_v6
15007 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
15008 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
15009 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15010 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15011 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15012 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15013 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15014 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15015 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15016 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
15017
15018 #undef THUMB_VARIANT
15019 #define THUMB_VARIANT &arm_ext_v6t2
15020 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
15021 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15022 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15023
15024 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
15025 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
15026
15027 /* ARM V6 not included in V7M (eg. integer SIMD). */
15028 #undef THUMB_VARIANT
15029 #define THUMB_VARIANT &arm_ext_v6_notm
15030 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
15031 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
15032 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
15033 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15034 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15035 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15036 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15037 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15038 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15039 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15040 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15041 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15042 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15043 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15044 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15045 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15046 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15047 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15048 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15049 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15050 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15051 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15052 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15053 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15054 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15055 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15056 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15057 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15058 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15059 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15060 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15061 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15062 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15063 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15064 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15065 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15066 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15067 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15068 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15069 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15070 UF(rfeib, 9900a00, 1, (RRw), rfe),
15071 UF(rfeda, 8100a00, 1, (RRw), rfe),
15072 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15073 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15074 UF(rfefa, 9900a00, 1, (RRw), rfe),
15075 UF(rfeea, 8100a00, 1, (RRw), rfe),
15076 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15077 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15078 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15079 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15080 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15081 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15082 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15083 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15084 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15085 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15086 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15087 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15088 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15089 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15090 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15091 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15092 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15093 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15094 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15095 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15096 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15097 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15098 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15099 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15100 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15101 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15102 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15103 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15104 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
15105 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
15106 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
15107 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
15108 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
15109 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
15110 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
15111 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15112 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15113 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
15114
15115 #undef ARM_VARIANT
15116 #define ARM_VARIANT &arm_ext_v6k
15117 #undef THUMB_VARIANT
15118 #define THUMB_VARIANT &arm_ext_v6k
15119 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
15120 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
15121 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
15122 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
15123
15124 #undef THUMB_VARIANT
15125 #define THUMB_VARIANT &arm_ext_v6_notm
15126 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
15127 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
15128
15129 #undef THUMB_VARIANT
15130 #define THUMB_VARIANT &arm_ext_v6t2
15131 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15132 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15133 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15134 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15135 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
15136
15137 #undef ARM_VARIANT
15138 #define ARM_VARIANT &arm_ext_v6z
15139 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
15140
15141 #undef ARM_VARIANT
15142 #define ARM_VARIANT &arm_ext_v6t2
15143 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
15144 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
15145 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15146 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15147
15148 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15149 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
15150 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
15151 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
15152
15153 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15154 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15155 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15156 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15157
15158 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
15159 UT(cbz, b100, 2, (RR, EXP), t_cbz),
15160 /* ARM does not really have an IT instruction, so always allow it. */
15161 #undef ARM_VARIANT
15162 #define ARM_VARIANT &arm_ext_v1
15163 TUE(it, 0, bf08, 1, (COND), it, t_it),
15164 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
15165 TUE(ite, 0, bf04, 1, (COND), it, t_it),
15166 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
15167 TUE(itet, 0, bf06, 1, (COND), it, t_it),
15168 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
15169 TUE(itee, 0, bf02, 1, (COND), it, t_it),
15170 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
15171 TUE(itett, 0, bf07, 1, (COND), it, t_it),
15172 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
15173 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
15174 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
15175 TUE(itete, 0, bf05, 1, (COND), it, t_it),
15176 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
15177 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
15178
15179 /* Thumb2 only instructions. */
15180 #undef ARM_VARIANT
15181 #define ARM_VARIANT NULL
15182
15183 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15184 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15185 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
15186 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
15187
15188 /* Thumb-2 hardware division instructions (R and M profiles only). */
15189 #undef THUMB_VARIANT
15190 #define THUMB_VARIANT &arm_ext_div
15191 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15192 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15193
15194 /* ARM V7 instructions. */
15195 #undef ARM_VARIANT
15196 #define ARM_VARIANT &arm_ext_v7
15197 #undef THUMB_VARIANT
15198 #define THUMB_VARIANT &arm_ext_v7
15199 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15200 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15201 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15202 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15203 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15204
15205 #undef ARM_VARIANT
15206 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15207 cCE(wfs, e200110, 1, (RR), rd),
15208 cCE(rfs, e300110, 1, (RR), rd),
15209 cCE(wfc, e400110, 1, (RR), rd),
15210 cCE(rfc, e500110, 1, (RR), rd),
15211
15212 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15213 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15214 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15215 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15216
15217 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15218 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15219 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15220 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15221
15222 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15223 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15224 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15225 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15226 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15227 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15228 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15229 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15230 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15231 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15232 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15233 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15234
15235 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15236 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15237 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15238 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15239 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15240 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15241 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15242 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15243 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15244 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15245 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15246 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15247
15248 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15249 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15250 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15251 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15252 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15253 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15254 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15255 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15256 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15257 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15258 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15259 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15260
15261 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15262 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15263 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15264 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15265 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15266 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15267 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15268 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15269 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15270 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15271 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15272 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15273
15274 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15275 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15276 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15277 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15278 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15279 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15280 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15281 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15282 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15283 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15284 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15285 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15286
15287 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15288 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15289 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15290 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15291 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15292 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15293 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15294 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15295 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15296 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15297 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15298 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15299
15300 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15301 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15302 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15303 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15304 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15305 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15306 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15307 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15308 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15309 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15310 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15311 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15312
15313 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15314 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15315 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15316 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15317 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15318 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15319 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15320 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15321 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15322 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15323 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15324 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15325
15326 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15327 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15328 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15329 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15330 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15331 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15332 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15333 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15334 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15335 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15336 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15337 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15338
15339 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15340 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15341 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15342 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15343 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15344 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
15345 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
15346 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
15347 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
15348 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
15349 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
15350 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
15351
15352 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
15353 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
15354 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
15355 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
15356 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
15357 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
15358 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
15359 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
15360 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
15361 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
15362 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
15363 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
15364
15365 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
15366 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
15367 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
15368 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
15369 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
15370 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
15371 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
15372 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
15373 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
15374 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
15375 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
15376 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
15377
15378 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
15379 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
15380 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
15381 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
15382 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
15383 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
15384 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
15385 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
15386 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
15387 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
15388 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
15389 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
15390
15391 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
15392 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
15393 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
15394 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
15395 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
15396 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
15397 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
15398 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
15399 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
15400 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
15401 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
15402 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
15403
15404 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
15405 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
15406 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
15407 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
15408 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
15409 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
15410 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
15411 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
15412 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
15413 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
15414 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
15415 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
15416
15417 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
15418 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
15419 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
15420 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
15421 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
15422 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
15423 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
15424 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
15425 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
15426 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
15427 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
15428 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
15429
15430 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
15431 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
15432 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
15433 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
15434 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
15435 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15436 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15437 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15438 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
15439 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
15440 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
15441 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
15442
15443 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
15444 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
15445 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
15446 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
15447 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
15448 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15449 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15450 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15451 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
15452 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
15453 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
15454 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
15455
15456 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
15457 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
15458 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
15459 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
15460 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
15461 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15462 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15463 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15464 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
15465 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
15466 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
15467 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
15468
15469 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
15470 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
15471 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
15472 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
15473 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
15474 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15475 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15476 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15477 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
15478 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
15479 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
15480 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
15481
15482 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
15483 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
15484 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
15485 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
15486 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
15487 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15488 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15489 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15490 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
15491 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
15492 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
15493 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
15494
15495 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
15496 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
15497 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
15498 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
15499 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
15500 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15501 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15502 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15503 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
15504 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
15505 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
15506 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
15507
15508 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
15509 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
15510 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
15511 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
15512 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
15513 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15514 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15515 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15516 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
15517 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
15518 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
15519 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
15520
15521 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
15522 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
15523 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
15524 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
15525 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
15526 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15527 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15528 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15529 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
15530 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
15531 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
15532 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
15533
15534 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
15535 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
15536 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
15537 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
15538 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
15539 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15540 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15541 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15542 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
15543 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
15544 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
15545 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
15546
15547 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
15548 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
15549 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
15550 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
15551 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
15552 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15553 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15554 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15555 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
15556 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
15557 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
15558 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
15559
15560 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15561 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15562 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15563 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15564 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15565 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15566 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15567 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15568 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15569 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15570 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15571 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15572
15573 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15574 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15575 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15576 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15577 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15578 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15579 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15580 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15581 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15582 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15583 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15584 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15585
15586 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15587 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15588 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15589 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15590 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15591 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15592 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15593 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15594 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15595 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15596 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15597 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15598
15599 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
15600 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
15601 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
15602 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
15603
15604 cCL(flts, e000110, 2, (RF, RR), rn_rd),
15605 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
15606 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
15607 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
15608 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
15609 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
15610 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
15611 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
15612 cCL(flte, e080110, 2, (RF, RR), rn_rd),
15613 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
15614 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
15615 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
15616
15617 /* The implementation of the FIX instruction is broken on some
15618 assemblers, in that it accepts a precision specifier as well as a
15619 rounding specifier, despite the fact that this is meaningless.
15620 To be more compatible, we accept it as well, though of course it
15621 does not set any bits. */
15622 cCE(fix, e100110, 2, (RR, RF), rd_rm),
15623 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
15624 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
15625 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
15626 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
15627 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
15628 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
15629 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
15630 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
15631 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
15632 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
15633 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
15634 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
15635
15636 /* Instructions that were new with the real FPA, call them V2. */
15637 #undef ARM_VARIANT
15638 #define ARM_VARIANT &fpu_fpa_ext_v2
15639 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15640 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15641 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15642 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15643 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15644 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15645
15646 #undef ARM_VARIANT
15647 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15648 /* Moves and type conversions. */
15649 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
15650 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
15651 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
15652 cCE(fmstat, ef1fa10, 0, (), noargs),
15653 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
15654 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
15655 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
15656 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15657 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
15658 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15659 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
15660 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
15661
15662 /* Memory operations. */
15663 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15664 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15665 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15666 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15667 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15668 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15669 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15670 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15671 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15672 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15673 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15674 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15675 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15676 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15677 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15678 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15679 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15680 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15681
15682 /* Monadic operations. */
15683 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
15684 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
15685 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
15686
15687 /* Dyadic operations. */
15688 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15689 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15690 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15691 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15692 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15693 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15694 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15695 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15696 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15697
15698 /* Comparisons. */
15699 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15700 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15701 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15702 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15703
15704 #undef ARM_VARIANT
15705 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15706 /* Moves and type conversions. */
15707 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15708 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15709 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15710 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15711 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15712 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15713 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15714 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15715 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15716 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15717 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15718 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15719 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15720
15721 /* Memory operations. */
15722 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15723 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15724 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15725 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15726 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15727 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15728 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15729 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15730 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15731 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15732
15733 /* Monadic operations. */
15734 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15735 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15736 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15737
15738 /* Dyadic operations. */
15739 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15740 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15741 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15742 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15743 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15744 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15745 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15746 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15747 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15748
15749 /* Comparisons. */
15750 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15751 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15752 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15753 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15754
15755 #undef ARM_VARIANT
15756 #define ARM_VARIANT &fpu_vfp_ext_v2
15757 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15758 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15759 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15760 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15761
15762 /* Instructions which may belong to either the Neon or VFP instruction sets.
15763 Individual encoder functions perform additional architecture checks. */
15764 #undef ARM_VARIANT
15765 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15766 #undef THUMB_VARIANT
15767 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15768 /* These mnemonics are unique to VFP. */
15769 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15770 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15771 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15772 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15773 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15774 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15775 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15776 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15777 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15778 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
15779
15780 /* Mnemonics shared by Neon and VFP. */
15781 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
15782 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15783 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15784
15785 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15786 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15787
15788 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15789 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15790
15791 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15792 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15793 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15794 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15795 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15796 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15797 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15798 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15799
15800 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
15801
15802 /* NOTE: All VMOV encoding is special-cased! */
15803 NCE(vmov, 0, 1, (VMOV), neon_mov),
15804 NCE(vmovq, 0, 1, (VMOV), neon_mov),
15805
15806 #undef THUMB_VARIANT
15807 #define THUMB_VARIANT &fpu_neon_ext_v1
15808 #undef ARM_VARIANT
15809 #define ARM_VARIANT &fpu_neon_ext_v1
15810 /* Data processing with three registers of the same length. */
15811 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15812 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
15813 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
15814 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15815 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15816 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15817 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15818 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15819 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15820 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15821 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15822 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15823 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15824 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15825 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15826 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15827 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15828 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15829 /* If not immediate, fall back to neon_dyadic_i64_su.
15830 shl_imm should accept I8 I16 I32 I64,
15831 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15832 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
15833 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
15834 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
15835 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
15836 /* Logic ops, types optional & ignored. */
15837 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
15838 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
15839 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
15840 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
15841 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
15842 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
15843 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
15844 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
15845 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
15846 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
15847 /* Bitfield ops, untyped. */
15848 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15849 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15850 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15851 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15852 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15853 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15854 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15855 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15856 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15857 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15858 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15859 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15860 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15861 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15862 back to neon_dyadic_if_su. */
15863 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15864 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15865 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15866 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15867 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15868 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15869 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15870 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15871 /* Comparison. Type I8 I16 I32 F32. */
15872 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
15873 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
15874 /* As above, D registers only. */
15875 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15876 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15877 /* Int and float variants, signedness unimportant. */
15878 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15879 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15880 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
15881 /* Add/sub take types I8 I16 I32 I64 F32. */
15882 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15883 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15884 /* vtst takes sizes 8, 16, 32. */
15885 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
15886 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
15887 /* VMUL takes I8 I16 I32 F32 P8. */
15888 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
15889 /* VQD{R}MULH takes S16 S32. */
15890 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15891 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15892 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15893 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15894 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15895 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15896 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15897 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15898 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15899 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15900 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15901 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15902 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15903 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15904 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15905 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15906
15907 /* Two address, int/float. Types S8 S16 S32 F32. */
15908 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
15909 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
15910
15911 /* Data processing with two registers and a shift amount. */
15912 /* Right shifts, and variants with rounding.
15913 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15914 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15915 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15916 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15917 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15918 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15919 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15920 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15921 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15922 /* Shift and insert. Sizes accepted 8 16 32 64. */
15923 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
15924 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
15925 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
15926 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
15927 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15928 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
15929 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
15930 /* Right shift immediate, saturating & narrowing, with rounding variants.
15931 Types accepted S16 S32 S64 U16 U32 U64. */
15932 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15933 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15934 /* As above, unsigned. Types accepted S16 S32 S64. */
15935 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15936 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15937 /* Right shift narrowing. Types accepted I16 I32 I64. */
15938 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15939 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15940 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15941 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
15942 /* CVT with optional immediate for fixed-point variant. */
15943 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
15944
15945 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
15946 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
15947
15948 /* Data processing, three registers of different lengths. */
15949 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15950 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
15951 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
15952 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
15953 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
15954 /* If not scalar, fall back to neon_dyadic_long.
15955 Vector types as above, scalar types S16 S32 U16 U32. */
15956 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15957 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15958 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15959 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15960 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15961 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15962 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15963 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15964 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15965 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15966 /* Saturating doubling multiplies. Types S16 S32. */
15967 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15968 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15969 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15970 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15971 S16 S32 U16 U32. */
15972 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
15973
15974 /* Extract. Size 8. */
15975 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
15976 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
15977
15978 /* Two registers, miscellaneous. */
15979 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15980 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
15981 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
15982 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
15983 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
15984 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
15985 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
15986 /* Vector replicate. Sizes 8 16 32. */
15987 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
15988 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
15989 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15990 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
15991 /* VMOVN. Types I16 I32 I64. */
15992 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
15993 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15994 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
15995 /* VQMOVUN. Types S16 S32 S64. */
15996 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
15997 /* VZIP / VUZP. Sizes 8 16 32. */
15998 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
15999 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
16000 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
16001 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
16002 /* VQABS / VQNEG. Types S8 S16 S32. */
16003 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16004 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
16005 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16006 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
16007 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
16008 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
16009 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
16010 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
16011 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
16012 /* Reciprocal estimates. Types U32 F32. */
16013 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
16014 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
16015 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
16016 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
16017 /* VCLS. Types S8 S16 S32. */
16018 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
16019 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
16020 /* VCLZ. Types I8 I16 I32. */
16021 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
16022 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
16023 /* VCNT. Size 8. */
16024 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
16025 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
16026 /* Two address, untyped. */
16027 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
16028 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
16029 /* VTRN. Sizes 8 16 32. */
16030 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
16031 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
16032
16033 /* Table lookup. Size 8. */
16034 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16035 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16036
16037 #undef THUMB_VARIANT
16038 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
16039 #undef ARM_VARIANT
16040 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
16041 /* Neon element/structure load/store. */
16042 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16043 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16044 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16045 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16046 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16047 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16048 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16049 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16050
16051 #undef THUMB_VARIANT
16052 #define THUMB_VARIANT &fpu_vfp_ext_v3
16053 #undef ARM_VARIANT
16054 #define ARM_VARIANT &fpu_vfp_ext_v3
16055 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
16056 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
16057 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16058 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16059 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16060 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16061 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16062 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16063 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16064 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16065 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16066 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16067 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16068 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16069 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16070 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16071 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16072 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16073
16074 #undef THUMB_VARIANT
16075 #undef ARM_VARIANT
16076 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
16077 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16078 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16079 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16080 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16081 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16082 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16083 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
16084 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
16085
16086 #undef ARM_VARIANT
16087 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16088 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
16089 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
16090 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
16091 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
16092 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
16093 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
16094 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
16095 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
16096 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
16097 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16098 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16099 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16100 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16101 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16102 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16103 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16104 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16105 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16106 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
16107 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
16108 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16109 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16110 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16111 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16112 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16113 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16114 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
16115 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
16116 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
16117 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
16118 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
16119 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
16120 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
16121 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
16122 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
16123 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
16124 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
16125 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16126 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16127 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16128 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16129 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16130 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16131 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16132 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16133 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16134 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
16135 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16136 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16137 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16138 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16139 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16140 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16141 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16142 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16143 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16144 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16145 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16146 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16147 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16148 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16149 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16150 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16151 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16152 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16153 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16154 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16155 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16156 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16157 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16158 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16159 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16160 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16161 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16162 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16163 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16164 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16165 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16166 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16167 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16168 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16169 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16170 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16171 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16172 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16173 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16174 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16175 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16176 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
16177 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16178 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16179 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16180 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16181 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16182 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16183 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16184 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16185 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16186 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16187 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16188 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16189 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16190 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16191 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16192 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16193 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16194 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16195 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16196 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16197 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16198 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16199 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16200 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16201 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16202 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16203 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16204 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16205 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16206 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16207 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16208 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16209 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16210 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16211 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16212 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16213 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16214 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16215 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16216 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16217 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16218 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16219 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16220 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16221 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16222 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16223 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16224 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16225 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16226 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16227 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16228 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16229 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16230 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16231 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16232 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16233 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16234 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16235 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16236 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16237 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16238 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16239 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16240 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16241 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16242 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16243 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16244 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16245 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16246 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16247 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16248 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16249 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16250
16251 #undef ARM_VARIANT
16252 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16253 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16254 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16255 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16256 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16257 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16258 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16259 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16260 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16261 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16262 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16263 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16264 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16265 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16266 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16267 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16268 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16269 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16270 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16271 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16272 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16273 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16274 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16275 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16276 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16277 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16278 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16279 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16280 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16281 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16282 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16283 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16284 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16285 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16286 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16287 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16288 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16289 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16290 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16291 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16292 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16293 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16294 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16295 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16296 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16297 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16298 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16299 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16300 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16301 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16302 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16303 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16304 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16305 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16306 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16307 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16308 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16309 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16310
16311 #undef ARM_VARIANT
16312 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16313 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16314 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16315 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16316 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16317 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16318 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16319 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16320 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16321 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16322 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16323 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16324 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16325 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16326 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16327 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16328 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16329 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16330 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16331 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16332 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16333 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16334 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16335 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16336 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16337 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16338 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16339 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16340 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16341 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
16342 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
16343 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
16344 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
16345 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
16346 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
16347 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
16348 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
16349 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
16350 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
16351 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
16352 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
16353 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
16354 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
16355 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
16356 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
16357 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
16358 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
16359 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
16360 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
16361 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
16362 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
16363 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
16364 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
16365 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
16366 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
16367 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
16368 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
16369 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
16370 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
16371 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
16372 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
16373 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
16374 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
16375 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
16376 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
16377 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16378 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16379 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16380 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16381 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16382 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16383 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16384 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16385 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16386 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16387 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16388 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16389 };
16390 #undef ARM_VARIANT
16391 #undef THUMB_VARIANT
16392 #undef TCE
16393 #undef TCM
16394 #undef TUE
16395 #undef TUF
16396 #undef TCC
16397 #undef cCE
16398 #undef cCL
16399 #undef C3E
16400 #undef CE
16401 #undef CM
16402 #undef UE
16403 #undef UF
16404 #undef UT
16405 #undef NUF
16406 #undef nUF
16407 #undef NCE
16408 #undef nCE
16409 #undef OPS0
16410 #undef OPS1
16411 #undef OPS2
16412 #undef OPS3
16413 #undef OPS4
16414 #undef OPS5
16415 #undef OPS6
16416 #undef do_0
16417 \f
16418 /* MD interface: bits in the object file. */
16419
16420 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16421 for use in the a.out file, and stores them in the array pointed to by buf.
16422 This knows about the endian-ness of the target machine and does
16423 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16424 2 (short) and 4 (long) Floating numbers are put out as a series of
16425 LITTLENUMS (shorts, here at least). */
16426
16427 void
16428 md_number_to_chars (char * buf, valueT val, int n)
16429 {
16430 if (target_big_endian)
16431 number_to_chars_bigendian (buf, val, n);
16432 else
16433 number_to_chars_littleendian (buf, val, n);
16434 }
16435
16436 static valueT
16437 md_chars_to_number (char * buf, int n)
16438 {
16439 valueT result = 0;
16440 unsigned char * where = (unsigned char *) buf;
16441
16442 if (target_big_endian)
16443 {
16444 while (n--)
16445 {
16446 result <<= 8;
16447 result |= (*where++ & 255);
16448 }
16449 }
16450 else
16451 {
16452 while (n--)
16453 {
16454 result <<= 8;
16455 result |= (where[n] & 255);
16456 }
16457 }
16458
16459 return result;
16460 }
16461
16462 /* MD interface: Sections. */
16463
16464 /* Estimate the size of a frag before relaxing. Assume everything fits in
16465 2 bytes. */
16466
16467 int
16468 md_estimate_size_before_relax (fragS * fragp,
16469 segT segtype ATTRIBUTE_UNUSED)
16470 {
16471 fragp->fr_var = 2;
16472 return 2;
16473 }
16474
16475 /* Convert a machine dependent frag. */
16476
16477 void
16478 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
16479 {
16480 unsigned long insn;
16481 unsigned long old_op;
16482 char *buf;
16483 expressionS exp;
16484 fixS *fixp;
16485 int reloc_type;
16486 int pc_rel;
16487 int opcode;
16488
16489 buf = fragp->fr_literal + fragp->fr_fix;
16490
16491 old_op = bfd_get_16(abfd, buf);
16492 if (fragp->fr_symbol) {
16493 exp.X_op = O_symbol;
16494 exp.X_add_symbol = fragp->fr_symbol;
16495 } else {
16496 exp.X_op = O_constant;
16497 }
16498 exp.X_add_number = fragp->fr_offset;
16499 opcode = fragp->fr_subtype;
16500 switch (opcode)
16501 {
16502 case T_MNEM_ldr_pc:
16503 case T_MNEM_ldr_pc2:
16504 case T_MNEM_ldr_sp:
16505 case T_MNEM_str_sp:
16506 case T_MNEM_ldr:
16507 case T_MNEM_ldrb:
16508 case T_MNEM_ldrh:
16509 case T_MNEM_str:
16510 case T_MNEM_strb:
16511 case T_MNEM_strh:
16512 if (fragp->fr_var == 4)
16513 {
16514 insn = THUMB_OP32(opcode);
16515 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
16516 {
16517 insn |= (old_op & 0x700) << 4;
16518 }
16519 else
16520 {
16521 insn |= (old_op & 7) << 12;
16522 insn |= (old_op & 0x38) << 13;
16523 }
16524 insn |= 0x00000c00;
16525 put_thumb32_insn (buf, insn);
16526 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
16527 }
16528 else
16529 {
16530 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
16531 }
16532 pc_rel = (opcode == T_MNEM_ldr_pc2);
16533 break;
16534 case T_MNEM_adr:
16535 if (fragp->fr_var == 4)
16536 {
16537 insn = THUMB_OP32 (opcode);
16538 insn |= (old_op & 0xf0) << 4;
16539 put_thumb32_insn (buf, insn);
16540 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
16541 }
16542 else
16543 {
16544 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16545 exp.X_add_number -= 4;
16546 }
16547 pc_rel = 1;
16548 break;
16549 case T_MNEM_mov:
16550 case T_MNEM_movs:
16551 case T_MNEM_cmp:
16552 case T_MNEM_cmn:
16553 if (fragp->fr_var == 4)
16554 {
16555 int r0off = (opcode == T_MNEM_mov
16556 || opcode == T_MNEM_movs) ? 0 : 8;
16557 insn = THUMB_OP32 (opcode);
16558 insn = (insn & 0xe1ffffff) | 0x10000000;
16559 insn |= (old_op & 0x700) << r0off;
16560 put_thumb32_insn (buf, insn);
16561 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16562 }
16563 else
16564 {
16565 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
16566 }
16567 pc_rel = 0;
16568 break;
16569 case T_MNEM_b:
16570 if (fragp->fr_var == 4)
16571 {
16572 insn = THUMB_OP32(opcode);
16573 put_thumb32_insn (buf, insn);
16574 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
16575 }
16576 else
16577 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
16578 pc_rel = 1;
16579 break;
16580 case T_MNEM_bcond:
16581 if (fragp->fr_var == 4)
16582 {
16583 insn = THUMB_OP32(opcode);
16584 insn |= (old_op & 0xf00) << 14;
16585 put_thumb32_insn (buf, insn);
16586 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
16587 }
16588 else
16589 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
16590 pc_rel = 1;
16591 break;
16592 case T_MNEM_add_sp:
16593 case T_MNEM_add_pc:
16594 case T_MNEM_inc_sp:
16595 case T_MNEM_dec_sp:
16596 if (fragp->fr_var == 4)
16597 {
16598 /* ??? Choose between add and addw. */
16599 insn = THUMB_OP32 (opcode);
16600 insn |= (old_op & 0xf0) << 4;
16601 put_thumb32_insn (buf, insn);
16602 if (opcode == T_MNEM_add_pc)
16603 reloc_type = BFD_RELOC_ARM_T32_IMM12;
16604 else
16605 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16606 }
16607 else
16608 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16609 pc_rel = 0;
16610 break;
16611
16612 case T_MNEM_addi:
16613 case T_MNEM_addis:
16614 case T_MNEM_subi:
16615 case T_MNEM_subis:
16616 if (fragp->fr_var == 4)
16617 {
16618 insn = THUMB_OP32 (opcode);
16619 insn |= (old_op & 0xf0) << 4;
16620 insn |= (old_op & 0xf) << 16;
16621 put_thumb32_insn (buf, insn);
16622 if (insn & (1 << 20))
16623 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16624 else
16625 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16626 }
16627 else
16628 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16629 pc_rel = 0;
16630 break;
16631 default:
16632 abort();
16633 }
16634 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
16635 reloc_type);
16636 fixp->fx_file = fragp->fr_file;
16637 fixp->fx_line = fragp->fr_line;
16638 fragp->fr_fix += fragp->fr_var;
16639 }
16640
16641 /* Return the size of a relaxable immediate operand instruction.
16642 SHIFT and SIZE specify the form of the allowable immediate. */
16643 static int
16644 relax_immediate (fragS *fragp, int size, int shift)
16645 {
16646 offsetT offset;
16647 offsetT mask;
16648 offsetT low;
16649
16650 /* ??? Should be able to do better than this. */
16651 if (fragp->fr_symbol)
16652 return 4;
16653
16654 low = (1 << shift) - 1;
16655 mask = (1 << (shift + size)) - (1 << shift);
16656 offset = fragp->fr_offset;
16657 /* Force misaligned offsets to 32-bit variant. */
16658 if (offset & low)
16659 return 4;
16660 if (offset & ~mask)
16661 return 4;
16662 return 2;
16663 }
16664
16665 /* Get the address of a symbol during relaxation. */
16666 static addressT
16667 relaxed_symbol_addr(fragS *fragp, long stretch)
16668 {
16669 fragS *sym_frag;
16670 addressT addr;
16671 symbolS *sym;
16672
16673 sym = fragp->fr_symbol;
16674 sym_frag = symbol_get_frag (sym);
16675 know (S_GET_SEGMENT (sym) != absolute_section
16676 || sym_frag == &zero_address_frag);
16677 addr = S_GET_VALUE (sym) + fragp->fr_offset;
16678
16679 /* If frag has yet to be reached on this pass, assume it will
16680 move by STRETCH just as we did. If this is not so, it will
16681 be because some frag between grows, and that will force
16682 another pass. */
16683
16684 if (stretch != 0
16685 && sym_frag->relax_marker != fragp->relax_marker)
16686 addr += stretch;
16687
16688 return addr;
16689 }
16690
16691 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16692 load. */
16693 static int
16694 relax_adr (fragS *fragp, asection *sec, long stretch)
16695 {
16696 addressT addr;
16697 offsetT val;
16698
16699 /* Assume worst case for symbols not known to be in the same section. */
16700 if (!S_IS_DEFINED(fragp->fr_symbol)
16701 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16702 return 4;
16703
16704 val = relaxed_symbol_addr(fragp, stretch);
16705 addr = fragp->fr_address + fragp->fr_fix;
16706 addr = (addr + 4) & ~3;
16707 /* Force misaligned targets to 32-bit variant. */
16708 if (val & 3)
16709 return 4;
16710 val -= addr;
16711 if (val < 0 || val > 1020)
16712 return 4;
16713 return 2;
16714 }
16715
16716 /* Return the size of a relaxable add/sub immediate instruction. */
16717 static int
16718 relax_addsub (fragS *fragp, asection *sec)
16719 {
16720 char *buf;
16721 int op;
16722
16723 buf = fragp->fr_literal + fragp->fr_fix;
16724 op = bfd_get_16(sec->owner, buf);
16725 if ((op & 0xf) == ((op >> 4) & 0xf))
16726 return relax_immediate (fragp, 8, 0);
16727 else
16728 return relax_immediate (fragp, 3, 0);
16729 }
16730
16731
16732 /* Return the size of a relaxable branch instruction. BITS is the
16733 size of the offset field in the narrow instruction. */
16734
16735 static int
16736 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
16737 {
16738 addressT addr;
16739 offsetT val;
16740 offsetT limit;
16741
16742 /* Assume worst case for symbols not known to be in the same section. */
16743 if (!S_IS_DEFINED(fragp->fr_symbol)
16744 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16745 return 4;
16746
16747 val = relaxed_symbol_addr(fragp, stretch);
16748 addr = fragp->fr_address + fragp->fr_fix + 4;
16749 val -= addr;
16750
16751 /* Offset is a signed value *2 */
16752 limit = 1 << bits;
16753 if (val >= limit || val < -limit)
16754 return 4;
16755 return 2;
16756 }
16757
16758
16759 /* Relax a machine dependent frag. This returns the amount by which
16760 the current size of the frag should change. */
16761
16762 int
16763 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
16764 {
16765 int oldsize;
16766 int newsize;
16767
16768 oldsize = fragp->fr_var;
16769 switch (fragp->fr_subtype)
16770 {
16771 case T_MNEM_ldr_pc2:
16772 newsize = relax_adr(fragp, sec, stretch);
16773 break;
16774 case T_MNEM_ldr_pc:
16775 case T_MNEM_ldr_sp:
16776 case T_MNEM_str_sp:
16777 newsize = relax_immediate(fragp, 8, 2);
16778 break;
16779 case T_MNEM_ldr:
16780 case T_MNEM_str:
16781 newsize = relax_immediate(fragp, 5, 2);
16782 break;
16783 case T_MNEM_ldrh:
16784 case T_MNEM_strh:
16785 newsize = relax_immediate(fragp, 5, 1);
16786 break;
16787 case T_MNEM_ldrb:
16788 case T_MNEM_strb:
16789 newsize = relax_immediate(fragp, 5, 0);
16790 break;
16791 case T_MNEM_adr:
16792 newsize = relax_adr(fragp, sec, stretch);
16793 break;
16794 case T_MNEM_mov:
16795 case T_MNEM_movs:
16796 case T_MNEM_cmp:
16797 case T_MNEM_cmn:
16798 newsize = relax_immediate(fragp, 8, 0);
16799 break;
16800 case T_MNEM_b:
16801 newsize = relax_branch(fragp, sec, 11, stretch);
16802 break;
16803 case T_MNEM_bcond:
16804 newsize = relax_branch(fragp, sec, 8, stretch);
16805 break;
16806 case T_MNEM_add_sp:
16807 case T_MNEM_add_pc:
16808 newsize = relax_immediate (fragp, 8, 2);
16809 break;
16810 case T_MNEM_inc_sp:
16811 case T_MNEM_dec_sp:
16812 newsize = relax_immediate (fragp, 7, 2);
16813 break;
16814 case T_MNEM_addi:
16815 case T_MNEM_addis:
16816 case T_MNEM_subi:
16817 case T_MNEM_subis:
16818 newsize = relax_addsub (fragp, sec);
16819 break;
16820 default:
16821 abort();
16822 }
16823
16824 fragp->fr_var = newsize;
16825 /* Freeze wide instructions that are at or before the same location as
16826 in the previous pass. This avoids infinite loops.
16827 Don't freeze them unconditionally because targets may be artificialy
16828 misaligned by the expansion of preceeding frags. */
16829 if (stretch <= 0 && newsize > 2)
16830 {
16831 md_convert_frag (sec->owner, sec, fragp);
16832 frag_wane(fragp);
16833 }
16834
16835 return newsize - oldsize;
16836 }
16837
16838 /* Round up a section size to the appropriate boundary. */
16839
16840 valueT
16841 md_section_align (segT segment ATTRIBUTE_UNUSED,
16842 valueT size)
16843 {
16844 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16845 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
16846 {
16847 /* For a.out, force the section size to be aligned. If we don't do
16848 this, BFD will align it for us, but it will not write out the
16849 final bytes of the section. This may be a bug in BFD, but it is
16850 easier to fix it here since that is how the other a.out targets
16851 work. */
16852 int align;
16853
16854 align = bfd_get_section_alignment (stdoutput, segment);
16855 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
16856 }
16857 #endif
16858
16859 return size;
16860 }
16861
16862 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16863 of an rs_align_code fragment. */
16864
16865 void
16866 arm_handle_align (fragS * fragP)
16867 {
16868 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16869 static char const thumb_noop[2] = { 0xc0, 0x46 };
16870 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16871 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
16872
16873 int bytes, fix, noop_size;
16874 char * p;
16875 const char * noop;
16876
16877 if (fragP->fr_type != rs_align_code)
16878 return;
16879
16880 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
16881 p = fragP->fr_literal + fragP->fr_fix;
16882 fix = 0;
16883
16884 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
16885 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
16886
16887 if (fragP->tc_frag_data)
16888 {
16889 if (target_big_endian)
16890 noop = thumb_bigend_noop;
16891 else
16892 noop = thumb_noop;
16893 noop_size = sizeof (thumb_noop);
16894 }
16895 else
16896 {
16897 if (target_big_endian)
16898 noop = arm_bigend_noop;
16899 else
16900 noop = arm_noop;
16901 noop_size = sizeof (arm_noop);
16902 }
16903
16904 if (bytes & (noop_size - 1))
16905 {
16906 fix = bytes & (noop_size - 1);
16907 memset (p, 0, fix);
16908 p += fix;
16909 bytes -= fix;
16910 }
16911
16912 while (bytes >= noop_size)
16913 {
16914 memcpy (p, noop, noop_size);
16915 p += noop_size;
16916 bytes -= noop_size;
16917 fix += noop_size;
16918 }
16919
16920 fragP->fr_fix += fix;
16921 fragP->fr_var = noop_size;
16922 }
16923
16924 /* Called from md_do_align. Used to create an alignment
16925 frag in a code section. */
16926
16927 void
16928 arm_frag_align_code (int n, int max)
16929 {
16930 char * p;
16931
16932 /* We assume that there will never be a requirement
16933 to support alignments greater than 32 bytes. */
16934 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
16935 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16936
16937 p = frag_var (rs_align_code,
16938 MAX_MEM_FOR_RS_ALIGN_CODE,
16939 1,
16940 (relax_substateT) max,
16941 (symbolS *) NULL,
16942 (offsetT) n,
16943 (char *) NULL);
16944 *p = 0;
16945 }
16946
16947 /* Perform target specific initialisation of a frag. */
16948
16949 void
16950 arm_init_frag (fragS * fragP)
16951 {
16952 /* Record whether this frag is in an ARM or a THUMB area. */
16953 fragP->tc_frag_data = thumb_mode;
16954 }
16955
16956 #ifdef OBJ_ELF
16957 /* When we change sections we need to issue a new mapping symbol. */
16958
16959 void
16960 arm_elf_change_section (void)
16961 {
16962 flagword flags;
16963 segment_info_type *seginfo;
16964
16965 /* Link an unlinked unwind index table section to the .text section. */
16966 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
16967 && elf_linked_to_section (now_seg) == NULL)
16968 elf_linked_to_section (now_seg) = text_section;
16969
16970 if (!SEG_NORMAL (now_seg))
16971 return;
16972
16973 flags = bfd_get_section_flags (stdoutput, now_seg);
16974
16975 /* We can ignore sections that only contain debug info. */
16976 if ((flags & SEC_ALLOC) == 0)
16977 return;
16978
16979 seginfo = seg_info (now_seg);
16980 mapstate = seginfo->tc_segment_info_data.mapstate;
16981 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
16982 }
16983
16984 int
16985 arm_elf_section_type (const char * str, size_t len)
16986 {
16987 if (len == 5 && strncmp (str, "exidx", 5) == 0)
16988 return SHT_ARM_EXIDX;
16989
16990 return -1;
16991 }
16992 \f
16993 /* Code to deal with unwinding tables. */
16994
16995 static void add_unwind_adjustsp (offsetT);
16996
16997 /* Cenerate and deferred unwind frame offset. */
16998
16999 static void
17000 flush_pending_unwind (void)
17001 {
17002 offsetT offset;
17003
17004 offset = unwind.pending_offset;
17005 unwind.pending_offset = 0;
17006 if (offset != 0)
17007 add_unwind_adjustsp (offset);
17008 }
17009
17010 /* Add an opcode to this list for this function. Two-byte opcodes should
17011 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
17012 order. */
17013
17014 static void
17015 add_unwind_opcode (valueT op, int length)
17016 {
17017 /* Add any deferred stack adjustment. */
17018 if (unwind.pending_offset)
17019 flush_pending_unwind ();
17020
17021 unwind.sp_restored = 0;
17022
17023 if (unwind.opcode_count + length > unwind.opcode_alloc)
17024 {
17025 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
17026 if (unwind.opcodes)
17027 unwind.opcodes = xrealloc (unwind.opcodes,
17028 unwind.opcode_alloc);
17029 else
17030 unwind.opcodes = xmalloc (unwind.opcode_alloc);
17031 }
17032 while (length > 0)
17033 {
17034 length--;
17035 unwind.opcodes[unwind.opcode_count] = op & 0xff;
17036 op >>= 8;
17037 unwind.opcode_count++;
17038 }
17039 }
17040
17041 /* Add unwind opcodes to adjust the stack pointer. */
17042
17043 static void
17044 add_unwind_adjustsp (offsetT offset)
17045 {
17046 valueT op;
17047
17048 if (offset > 0x200)
17049 {
17050 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
17051 char bytes[5];
17052 int n;
17053 valueT o;
17054
17055 /* Long form: 0xb2, uleb128. */
17056 /* This might not fit in a word so add the individual bytes,
17057 remembering the list is built in reverse order. */
17058 o = (valueT) ((offset - 0x204) >> 2);
17059 if (o == 0)
17060 add_unwind_opcode (0, 1);
17061
17062 /* Calculate the uleb128 encoding of the offset. */
17063 n = 0;
17064 while (o)
17065 {
17066 bytes[n] = o & 0x7f;
17067 o >>= 7;
17068 if (o)
17069 bytes[n] |= 0x80;
17070 n++;
17071 }
17072 /* Add the insn. */
17073 for (; n; n--)
17074 add_unwind_opcode (bytes[n - 1], 1);
17075 add_unwind_opcode (0xb2, 1);
17076 }
17077 else if (offset > 0x100)
17078 {
17079 /* Two short opcodes. */
17080 add_unwind_opcode (0x3f, 1);
17081 op = (offset - 0x104) >> 2;
17082 add_unwind_opcode (op, 1);
17083 }
17084 else if (offset > 0)
17085 {
17086 /* Short opcode. */
17087 op = (offset - 4) >> 2;
17088 add_unwind_opcode (op, 1);
17089 }
17090 else if (offset < 0)
17091 {
17092 offset = -offset;
17093 while (offset > 0x100)
17094 {
17095 add_unwind_opcode (0x7f, 1);
17096 offset -= 0x100;
17097 }
17098 op = ((offset - 4) >> 2) | 0x40;
17099 add_unwind_opcode (op, 1);
17100 }
17101 }
17102
17103 /* Finish the list of unwind opcodes for this function. */
17104 static void
17105 finish_unwind_opcodes (void)
17106 {
17107 valueT op;
17108
17109 if (unwind.fp_used)
17110 {
17111 /* Adjust sp as necessary. */
17112 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
17113 flush_pending_unwind ();
17114
17115 /* After restoring sp from the frame pointer. */
17116 op = 0x90 | unwind.fp_reg;
17117 add_unwind_opcode (op, 1);
17118 }
17119 else
17120 flush_pending_unwind ();
17121 }
17122
17123
17124 /* Start an exception table entry. If idx is nonzero this is an index table
17125 entry. */
17126
17127 static void
17128 start_unwind_section (const segT text_seg, int idx)
17129 {
17130 const char * text_name;
17131 const char * prefix;
17132 const char * prefix_once;
17133 const char * group_name;
17134 size_t prefix_len;
17135 size_t text_len;
17136 char * sec_name;
17137 size_t sec_name_len;
17138 int type;
17139 int flags;
17140 int linkonce;
17141
17142 if (idx)
17143 {
17144 prefix = ELF_STRING_ARM_unwind;
17145 prefix_once = ELF_STRING_ARM_unwind_once;
17146 type = SHT_ARM_EXIDX;
17147 }
17148 else
17149 {
17150 prefix = ELF_STRING_ARM_unwind_info;
17151 prefix_once = ELF_STRING_ARM_unwind_info_once;
17152 type = SHT_PROGBITS;
17153 }
17154
17155 text_name = segment_name (text_seg);
17156 if (streq (text_name, ".text"))
17157 text_name = "";
17158
17159 if (strncmp (text_name, ".gnu.linkonce.t.",
17160 strlen (".gnu.linkonce.t.")) == 0)
17161 {
17162 prefix = prefix_once;
17163 text_name += strlen (".gnu.linkonce.t.");
17164 }
17165
17166 prefix_len = strlen (prefix);
17167 text_len = strlen (text_name);
17168 sec_name_len = prefix_len + text_len;
17169 sec_name = xmalloc (sec_name_len + 1);
17170 memcpy (sec_name, prefix, prefix_len);
17171 memcpy (sec_name + prefix_len, text_name, text_len);
17172 sec_name[prefix_len + text_len] = '\0';
17173
17174 flags = SHF_ALLOC;
17175 linkonce = 0;
17176 group_name = 0;
17177
17178 /* Handle COMDAT group. */
17179 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
17180 {
17181 group_name = elf_group_name (text_seg);
17182 if (group_name == NULL)
17183 {
17184 as_bad ("Group section `%s' has no group signature",
17185 segment_name (text_seg));
17186 ignore_rest_of_line ();
17187 return;
17188 }
17189 flags |= SHF_GROUP;
17190 linkonce = 1;
17191 }
17192
17193 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
17194
17195 /* Set the setion link for index tables. */
17196 if (idx)
17197 elf_linked_to_section (now_seg) = text_seg;
17198 }
17199
17200
17201 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17202 personality routine data. Returns zero, or the index table value for
17203 and inline entry. */
17204
17205 static valueT
17206 create_unwind_entry (int have_data)
17207 {
17208 int size;
17209 addressT where;
17210 char *ptr;
17211 /* The current word of data. */
17212 valueT data;
17213 /* The number of bytes left in this word. */
17214 int n;
17215
17216 finish_unwind_opcodes ();
17217
17218 /* Remember the current text section. */
17219 unwind.saved_seg = now_seg;
17220 unwind.saved_subseg = now_subseg;
17221
17222 start_unwind_section (now_seg, 0);
17223
17224 if (unwind.personality_routine == NULL)
17225 {
17226 if (unwind.personality_index == -2)
17227 {
17228 if (have_data)
17229 as_bad (_("handerdata in cantunwind frame"));
17230 return 1; /* EXIDX_CANTUNWIND. */
17231 }
17232
17233 /* Use a default personality routine if none is specified. */
17234 if (unwind.personality_index == -1)
17235 {
17236 if (unwind.opcode_count > 3)
17237 unwind.personality_index = 1;
17238 else
17239 unwind.personality_index = 0;
17240 }
17241
17242 /* Space for the personality routine entry. */
17243 if (unwind.personality_index == 0)
17244 {
17245 if (unwind.opcode_count > 3)
17246 as_bad (_("too many unwind opcodes for personality routine 0"));
17247
17248 if (!have_data)
17249 {
17250 /* All the data is inline in the index table. */
17251 data = 0x80;
17252 n = 3;
17253 while (unwind.opcode_count > 0)
17254 {
17255 unwind.opcode_count--;
17256 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17257 n--;
17258 }
17259
17260 /* Pad with "finish" opcodes. */
17261 while (n--)
17262 data = (data << 8) | 0xb0;
17263
17264 return data;
17265 }
17266 size = 0;
17267 }
17268 else
17269 /* We get two opcodes "free" in the first word. */
17270 size = unwind.opcode_count - 2;
17271 }
17272 else
17273 /* An extra byte is required for the opcode count. */
17274 size = unwind.opcode_count + 1;
17275
17276 size = (size + 3) >> 2;
17277 if (size > 0xff)
17278 as_bad (_("too many unwind opcodes"));
17279
17280 frag_align (2, 0, 0);
17281 record_alignment (now_seg, 2);
17282 unwind.table_entry = expr_build_dot ();
17283
17284 /* Allocate the table entry. */
17285 ptr = frag_more ((size << 2) + 4);
17286 where = frag_now_fix () - ((size << 2) + 4);
17287
17288 switch (unwind.personality_index)
17289 {
17290 case -1:
17291 /* ??? Should this be a PLT generating relocation? */
17292 /* Custom personality routine. */
17293 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17294 BFD_RELOC_ARM_PREL31);
17295
17296 where += 4;
17297 ptr += 4;
17298
17299 /* Set the first byte to the number of additional words. */
17300 data = size - 1;
17301 n = 3;
17302 break;
17303
17304 /* ABI defined personality routines. */
17305 case 0:
17306 /* Three opcodes bytes are packed into the first word. */
17307 data = 0x80;
17308 n = 3;
17309 break;
17310
17311 case 1:
17312 case 2:
17313 /* The size and first two opcode bytes go in the first word. */
17314 data = ((0x80 + unwind.personality_index) << 8) | size;
17315 n = 2;
17316 break;
17317
17318 default:
17319 /* Should never happen. */
17320 abort ();
17321 }
17322
17323 /* Pack the opcodes into words (MSB first), reversing the list at the same
17324 time. */
17325 while (unwind.opcode_count > 0)
17326 {
17327 if (n == 0)
17328 {
17329 md_number_to_chars (ptr, data, 4);
17330 ptr += 4;
17331 n = 4;
17332 data = 0;
17333 }
17334 unwind.opcode_count--;
17335 n--;
17336 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17337 }
17338
17339 /* Finish off the last word. */
17340 if (n < 4)
17341 {
17342 /* Pad with "finish" opcodes. */
17343 while (n--)
17344 data = (data << 8) | 0xb0;
17345
17346 md_number_to_chars (ptr, data, 4);
17347 }
17348
17349 if (!have_data)
17350 {
17351 /* Add an empty descriptor if there is no user-specified data. */
17352 ptr = frag_more (4);
17353 md_number_to_chars (ptr, 0, 4);
17354 }
17355
17356 return 0;
17357 }
17358
17359
17360 /* Initialize the DWARF-2 unwind information for this procedure. */
17361
17362 void
17363 tc_arm_frame_initial_instructions (void)
17364 {
17365 cfi_add_CFA_def_cfa (REG_SP, 0);
17366 }
17367 #endif /* OBJ_ELF */
17368
17369 /* Convert REGNAME to a DWARF-2 register number. */
17370
17371 int
17372 tc_arm_regname_to_dw2regnum (char *regname)
17373 {
17374 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
17375
17376 if (reg == FAIL)
17377 return -1;
17378
17379 return reg;
17380 }
17381
17382 #ifdef TE_PE
17383 void
17384 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
17385 {
17386 expressionS expr;
17387
17388 expr.X_op = O_secrel;
17389 expr.X_add_symbol = symbol;
17390 expr.X_add_number = 0;
17391 emit_expr (&expr, size);
17392 }
17393 #endif
17394
17395 /* MD interface: Symbol and relocation handling. */
17396
17397 /* Return the address within the segment that a PC-relative fixup is
17398 relative to. For ARM, PC-relative fixups applied to instructions
17399 are generally relative to the location of the fixup plus 8 bytes.
17400 Thumb branches are offset by 4, and Thumb loads relative to PC
17401 require special handling. */
17402
17403 long
17404 md_pcrel_from_section (fixS * fixP, segT seg)
17405 {
17406 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
17407
17408 /* If this is pc-relative and we are going to emit a relocation
17409 then we just want to put out any pipeline compensation that the linker
17410 will need. Otherwise we want to use the calculated base.
17411 For WinCE we skip the bias for externals as well, since this
17412 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17413 if (fixP->fx_pcrel
17414 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
17415 || (arm_force_relocation (fixP)
17416 #ifdef TE_WINCE
17417 && !S_IS_EXTERNAL (fixP->fx_addsy)
17418 #endif
17419 )))
17420 base = 0;
17421
17422 switch (fixP->fx_r_type)
17423 {
17424 /* PC relative addressing on the Thumb is slightly odd as the
17425 bottom two bits of the PC are forced to zero for the
17426 calculation. This happens *after* application of the
17427 pipeline offset. However, Thumb adrl already adjusts for
17428 this, so we need not do it again. */
17429 case BFD_RELOC_ARM_THUMB_ADD:
17430 return base & ~3;
17431
17432 case BFD_RELOC_ARM_THUMB_OFFSET:
17433 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17434 case BFD_RELOC_ARM_T32_ADD_PC12:
17435 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17436 return (base + 4) & ~3;
17437
17438 /* Thumb branches are simply offset by +4. */
17439 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17440 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17441 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17442 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17443 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17444 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17445 case BFD_RELOC_THUMB_PCREL_BLX:
17446 return base + 4;
17447
17448 /* ARM mode branches are offset by +8. However, the Windows CE
17449 loader expects the relocation not to take this into account. */
17450 case BFD_RELOC_ARM_PCREL_BRANCH:
17451 case BFD_RELOC_ARM_PCREL_CALL:
17452 case BFD_RELOC_ARM_PCREL_JUMP:
17453 case BFD_RELOC_ARM_PCREL_BLX:
17454 case BFD_RELOC_ARM_PLT32:
17455 #ifdef TE_WINCE
17456 /* When handling fixups immediately, because we have already
17457 discovered the value of a symbol, or the address of the frag involved
17458 we must account for the offset by +8, as the OS loader will never see the reloc.
17459 see fixup_segment() in write.c
17460 The S_IS_EXTERNAL test handles the case of global symbols.
17461 Those need the calculated base, not just the pipe compensation the linker will need. */
17462 if (fixP->fx_pcrel
17463 && fixP->fx_addsy != NULL
17464 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
17465 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
17466 return base + 8;
17467 return base;
17468 #else
17469 return base + 8;
17470 #endif
17471
17472 /* ARM mode loads relative to PC are also offset by +8. Unlike
17473 branches, the Windows CE loader *does* expect the relocation
17474 to take this into account. */
17475 case BFD_RELOC_ARM_OFFSET_IMM:
17476 case BFD_RELOC_ARM_OFFSET_IMM8:
17477 case BFD_RELOC_ARM_HWLITERAL:
17478 case BFD_RELOC_ARM_LITERAL:
17479 case BFD_RELOC_ARM_CP_OFF_IMM:
17480 return base + 8;
17481
17482
17483 /* Other PC-relative relocations are un-offset. */
17484 default:
17485 return base;
17486 }
17487 }
17488
17489 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17490 Otherwise we have no need to default values of symbols. */
17491
17492 symbolS *
17493 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
17494 {
17495 #ifdef OBJ_ELF
17496 if (name[0] == '_' && name[1] == 'G'
17497 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
17498 {
17499 if (!GOT_symbol)
17500 {
17501 if (symbol_find (name))
17502 as_bad ("GOT already in the symbol table");
17503
17504 GOT_symbol = symbol_new (name, undefined_section,
17505 (valueT) 0, & zero_address_frag);
17506 }
17507
17508 return GOT_symbol;
17509 }
17510 #endif
17511
17512 return 0;
17513 }
17514
17515 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17516 computed as two separate immediate values, added together. We
17517 already know that this value cannot be computed by just one ARM
17518 instruction. */
17519
17520 static unsigned int
17521 validate_immediate_twopart (unsigned int val,
17522 unsigned int * highpart)
17523 {
17524 unsigned int a;
17525 unsigned int i;
17526
17527 for (i = 0; i < 32; i += 2)
17528 if (((a = rotate_left (val, i)) & 0xff) != 0)
17529 {
17530 if (a & 0xff00)
17531 {
17532 if (a & ~ 0xffff)
17533 continue;
17534 * highpart = (a >> 8) | ((i + 24) << 7);
17535 }
17536 else if (a & 0xff0000)
17537 {
17538 if (a & 0xff000000)
17539 continue;
17540 * highpart = (a >> 16) | ((i + 16) << 7);
17541 }
17542 else
17543 {
17544 assert (a & 0xff000000);
17545 * highpart = (a >> 24) | ((i + 8) << 7);
17546 }
17547
17548 return (a & 0xff) | (i << 7);
17549 }
17550
17551 return FAIL;
17552 }
17553
17554 static int
17555 validate_offset_imm (unsigned int val, int hwse)
17556 {
17557 if ((hwse && val > 255) || val > 4095)
17558 return FAIL;
17559 return val;
17560 }
17561
17562 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17563 negative immediate constant by altering the instruction. A bit of
17564 a hack really.
17565 MOV <-> MVN
17566 AND <-> BIC
17567 ADC <-> SBC
17568 by inverting the second operand, and
17569 ADD <-> SUB
17570 CMP <-> CMN
17571 by negating the second operand. */
17572
17573 static int
17574 negate_data_op (unsigned long * instruction,
17575 unsigned long value)
17576 {
17577 int op, new_inst;
17578 unsigned long negated, inverted;
17579
17580 negated = encode_arm_immediate (-value);
17581 inverted = encode_arm_immediate (~value);
17582
17583 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
17584 switch (op)
17585 {
17586 /* First negates. */
17587 case OPCODE_SUB: /* ADD <-> SUB */
17588 new_inst = OPCODE_ADD;
17589 value = negated;
17590 break;
17591
17592 case OPCODE_ADD:
17593 new_inst = OPCODE_SUB;
17594 value = negated;
17595 break;
17596
17597 case OPCODE_CMP: /* CMP <-> CMN */
17598 new_inst = OPCODE_CMN;
17599 value = negated;
17600 break;
17601
17602 case OPCODE_CMN:
17603 new_inst = OPCODE_CMP;
17604 value = negated;
17605 break;
17606
17607 /* Now Inverted ops. */
17608 case OPCODE_MOV: /* MOV <-> MVN */
17609 new_inst = OPCODE_MVN;
17610 value = inverted;
17611 break;
17612
17613 case OPCODE_MVN:
17614 new_inst = OPCODE_MOV;
17615 value = inverted;
17616 break;
17617
17618 case OPCODE_AND: /* AND <-> BIC */
17619 new_inst = OPCODE_BIC;
17620 value = inverted;
17621 break;
17622
17623 case OPCODE_BIC:
17624 new_inst = OPCODE_AND;
17625 value = inverted;
17626 break;
17627
17628 case OPCODE_ADC: /* ADC <-> SBC */
17629 new_inst = OPCODE_SBC;
17630 value = inverted;
17631 break;
17632
17633 case OPCODE_SBC:
17634 new_inst = OPCODE_ADC;
17635 value = inverted;
17636 break;
17637
17638 /* We cannot do anything. */
17639 default:
17640 return FAIL;
17641 }
17642
17643 if (value == (unsigned) FAIL)
17644 return FAIL;
17645
17646 *instruction &= OPCODE_MASK;
17647 *instruction |= new_inst << DATA_OP_SHIFT;
17648 return value;
17649 }
17650
17651 /* Like negate_data_op, but for Thumb-2. */
17652
17653 static unsigned int
17654 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
17655 {
17656 int op, new_inst;
17657 int rd;
17658 unsigned int negated, inverted;
17659
17660 negated = encode_thumb32_immediate (-value);
17661 inverted = encode_thumb32_immediate (~value);
17662
17663 rd = (*instruction >> 8) & 0xf;
17664 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
17665 switch (op)
17666 {
17667 /* ADD <-> SUB. Includes CMP <-> CMN. */
17668 case T2_OPCODE_SUB:
17669 new_inst = T2_OPCODE_ADD;
17670 value = negated;
17671 break;
17672
17673 case T2_OPCODE_ADD:
17674 new_inst = T2_OPCODE_SUB;
17675 value = negated;
17676 break;
17677
17678 /* ORR <-> ORN. Includes MOV <-> MVN. */
17679 case T2_OPCODE_ORR:
17680 new_inst = T2_OPCODE_ORN;
17681 value = inverted;
17682 break;
17683
17684 case T2_OPCODE_ORN:
17685 new_inst = T2_OPCODE_ORR;
17686 value = inverted;
17687 break;
17688
17689 /* AND <-> BIC. TST has no inverted equivalent. */
17690 case T2_OPCODE_AND:
17691 new_inst = T2_OPCODE_BIC;
17692 if (rd == 15)
17693 value = FAIL;
17694 else
17695 value = inverted;
17696 break;
17697
17698 case T2_OPCODE_BIC:
17699 new_inst = T2_OPCODE_AND;
17700 value = inverted;
17701 break;
17702
17703 /* ADC <-> SBC */
17704 case T2_OPCODE_ADC:
17705 new_inst = T2_OPCODE_SBC;
17706 value = inverted;
17707 break;
17708
17709 case T2_OPCODE_SBC:
17710 new_inst = T2_OPCODE_ADC;
17711 value = inverted;
17712 break;
17713
17714 /* We cannot do anything. */
17715 default:
17716 return FAIL;
17717 }
17718
17719 if (value == (unsigned int)FAIL)
17720 return FAIL;
17721
17722 *instruction &= T2_OPCODE_MASK;
17723 *instruction |= new_inst << T2_DATA_OP_SHIFT;
17724 return value;
17725 }
17726
17727 /* Read a 32-bit thumb instruction from buf. */
17728 static unsigned long
17729 get_thumb32_insn (char * buf)
17730 {
17731 unsigned long insn;
17732 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
17733 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17734
17735 return insn;
17736 }
17737
17738
17739 /* We usually want to set the low bit on the address of thumb function
17740 symbols. In particular .word foo - . should have the low bit set.
17741 Generic code tries to fold the difference of two symbols to
17742 a constant. Prevent this and force a relocation when the first symbols
17743 is a thumb function. */
17744 int
17745 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
17746 {
17747 if (op == O_subtract
17748 && l->X_op == O_symbol
17749 && r->X_op == O_symbol
17750 && THUMB_IS_FUNC (l->X_add_symbol))
17751 {
17752 l->X_op = O_subtract;
17753 l->X_op_symbol = r->X_add_symbol;
17754 l->X_add_number -= r->X_add_number;
17755 return 1;
17756 }
17757 /* Process as normal. */
17758 return 0;
17759 }
17760
17761 void
17762 md_apply_fix (fixS * fixP,
17763 valueT * valP,
17764 segT seg)
17765 {
17766 offsetT value = * valP;
17767 offsetT newval;
17768 unsigned int newimm;
17769 unsigned long temp;
17770 int sign;
17771 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
17772
17773 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
17774
17775 /* Note whether this will delete the relocation. */
17776
17777 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
17778 fixP->fx_done = 1;
17779
17780 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17781 consistency with the behavior on 32-bit hosts. Remember value
17782 for emit_reloc. */
17783 value &= 0xffffffff;
17784 value ^= 0x80000000;
17785 value -= 0x80000000;
17786
17787 *valP = value;
17788 fixP->fx_addnumber = value;
17789
17790 /* Same treatment for fixP->fx_offset. */
17791 fixP->fx_offset &= 0xffffffff;
17792 fixP->fx_offset ^= 0x80000000;
17793 fixP->fx_offset -= 0x80000000;
17794
17795 switch (fixP->fx_r_type)
17796 {
17797 case BFD_RELOC_NONE:
17798 /* This will need to go in the object file. */
17799 fixP->fx_done = 0;
17800 break;
17801
17802 case BFD_RELOC_ARM_IMMEDIATE:
17803 /* We claim that this fixup has been processed here,
17804 even if in fact we generate an error because we do
17805 not have a reloc for it, so tc_gen_reloc will reject it. */
17806 fixP->fx_done = 1;
17807
17808 if (fixP->fx_addsy
17809 && ! S_IS_DEFINED (fixP->fx_addsy))
17810 {
17811 as_bad_where (fixP->fx_file, fixP->fx_line,
17812 _("undefined symbol %s used as an immediate value"),
17813 S_GET_NAME (fixP->fx_addsy));
17814 break;
17815 }
17816
17817 newimm = encode_arm_immediate (value);
17818 temp = md_chars_to_number (buf, INSN_SIZE);
17819
17820 /* If the instruction will fail, see if we can fix things up by
17821 changing the opcode. */
17822 if (newimm == (unsigned int) FAIL
17823 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
17824 {
17825 as_bad_where (fixP->fx_file, fixP->fx_line,
17826 _("invalid constant (%lx) after fixup"),
17827 (unsigned long) value);
17828 break;
17829 }
17830
17831 newimm |= (temp & 0xfffff000);
17832 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17833 break;
17834
17835 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17836 {
17837 unsigned int highpart = 0;
17838 unsigned int newinsn = 0xe1a00000; /* nop. */
17839
17840 newimm = encode_arm_immediate (value);
17841 temp = md_chars_to_number (buf, INSN_SIZE);
17842
17843 /* If the instruction will fail, see if we can fix things up by
17844 changing the opcode. */
17845 if (newimm == (unsigned int) FAIL
17846 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
17847 {
17848 /* No ? OK - try using two ADD instructions to generate
17849 the value. */
17850 newimm = validate_immediate_twopart (value, & highpart);
17851
17852 /* Yes - then make sure that the second instruction is
17853 also an add. */
17854 if (newimm != (unsigned int) FAIL)
17855 newinsn = temp;
17856 /* Still No ? Try using a negated value. */
17857 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
17858 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
17859 /* Otherwise - give up. */
17860 else
17861 {
17862 as_bad_where (fixP->fx_file, fixP->fx_line,
17863 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17864 (long) value);
17865 break;
17866 }
17867
17868 /* Replace the first operand in the 2nd instruction (which
17869 is the PC) with the destination register. We have
17870 already added in the PC in the first instruction and we
17871 do not want to do it again. */
17872 newinsn &= ~ 0xf0000;
17873 newinsn |= ((newinsn & 0x0f000) << 4);
17874 }
17875
17876 newimm |= (temp & 0xfffff000);
17877 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17878
17879 highpart |= (newinsn & 0xfffff000);
17880 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
17881 }
17882 break;
17883
17884 case BFD_RELOC_ARM_OFFSET_IMM:
17885 if (!fixP->fx_done && seg->use_rela_p)
17886 value = 0;
17887
17888 case BFD_RELOC_ARM_LITERAL:
17889 sign = value >= 0;
17890
17891 if (value < 0)
17892 value = - value;
17893
17894 if (validate_offset_imm (value, 0) == FAIL)
17895 {
17896 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
17897 as_bad_where (fixP->fx_file, fixP->fx_line,
17898 _("invalid literal constant: pool needs to be closer"));
17899 else
17900 as_bad_where (fixP->fx_file, fixP->fx_line,
17901 _("bad immediate value for offset (%ld)"),
17902 (long) value);
17903 break;
17904 }
17905
17906 newval = md_chars_to_number (buf, INSN_SIZE);
17907 newval &= 0xff7ff000;
17908 newval |= value | (sign ? INDEX_UP : 0);
17909 md_number_to_chars (buf, newval, INSN_SIZE);
17910 break;
17911
17912 case BFD_RELOC_ARM_OFFSET_IMM8:
17913 case BFD_RELOC_ARM_HWLITERAL:
17914 sign = value >= 0;
17915
17916 if (value < 0)
17917 value = - value;
17918
17919 if (validate_offset_imm (value, 1) == FAIL)
17920 {
17921 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
17922 as_bad_where (fixP->fx_file, fixP->fx_line,
17923 _("invalid literal constant: pool needs to be closer"));
17924 else
17925 as_bad (_("bad immediate value for half-word offset (%ld)"),
17926 (long) value);
17927 break;
17928 }
17929
17930 newval = md_chars_to_number (buf, INSN_SIZE);
17931 newval &= 0xff7ff0f0;
17932 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
17933 md_number_to_chars (buf, newval, INSN_SIZE);
17934 break;
17935
17936 case BFD_RELOC_ARM_T32_OFFSET_U8:
17937 if (value < 0 || value > 1020 || value % 4 != 0)
17938 as_bad_where (fixP->fx_file, fixP->fx_line,
17939 _("bad immediate value for offset (%ld)"), (long) value);
17940 value /= 4;
17941
17942 newval = md_chars_to_number (buf+2, THUMB_SIZE);
17943 newval |= value;
17944 md_number_to_chars (buf+2, newval, THUMB_SIZE);
17945 break;
17946
17947 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17948 /* This is a complicated relocation used for all varieties of Thumb32
17949 load/store instruction with immediate offset:
17950
17951 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17952 *4, optional writeback(W)
17953 (doubleword load/store)
17954
17955 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17956 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17957 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17958 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17959 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17960
17961 Uppercase letters indicate bits that are already encoded at
17962 this point. Lowercase letters are our problem. For the
17963 second block of instructions, the secondary opcode nybble
17964 (bits 8..11) is present, and bit 23 is zero, even if this is
17965 a PC-relative operation. */
17966 newval = md_chars_to_number (buf, THUMB_SIZE);
17967 newval <<= 16;
17968 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
17969
17970 if ((newval & 0xf0000000) == 0xe0000000)
17971 {
17972 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17973 if (value >= 0)
17974 newval |= (1 << 23);
17975 else
17976 value = -value;
17977 if (value % 4 != 0)
17978 {
17979 as_bad_where (fixP->fx_file, fixP->fx_line,
17980 _("offset not a multiple of 4"));
17981 break;
17982 }
17983 value /= 4;
17984 if (value > 0xff)
17985 {
17986 as_bad_where (fixP->fx_file, fixP->fx_line,
17987 _("offset out of range"));
17988 break;
17989 }
17990 newval &= ~0xff;
17991 }
17992 else if ((newval & 0x000f0000) == 0x000f0000)
17993 {
17994 /* PC-relative, 12-bit offset. */
17995 if (value >= 0)
17996 newval |= (1 << 23);
17997 else
17998 value = -value;
17999 if (value > 0xfff)
18000 {
18001 as_bad_where (fixP->fx_file, fixP->fx_line,
18002 _("offset out of range"));
18003 break;
18004 }
18005 newval &= ~0xfff;
18006 }
18007 else if ((newval & 0x00000100) == 0x00000100)
18008 {
18009 /* Writeback: 8-bit, +/- offset. */
18010 if (value >= 0)
18011 newval |= (1 << 9);
18012 else
18013 value = -value;
18014 if (value > 0xff)
18015 {
18016 as_bad_where (fixP->fx_file, fixP->fx_line,
18017 _("offset out of range"));
18018 break;
18019 }
18020 newval &= ~0xff;
18021 }
18022 else if ((newval & 0x00000f00) == 0x00000e00)
18023 {
18024 /* T-instruction: positive 8-bit offset. */
18025 if (value < 0 || value > 0xff)
18026 {
18027 as_bad_where (fixP->fx_file, fixP->fx_line,
18028 _("offset out of range"));
18029 break;
18030 }
18031 newval &= ~0xff;
18032 newval |= value;
18033 }
18034 else
18035 {
18036 /* Positive 12-bit or negative 8-bit offset. */
18037 int limit;
18038 if (value >= 0)
18039 {
18040 newval |= (1 << 23);
18041 limit = 0xfff;
18042 }
18043 else
18044 {
18045 value = -value;
18046 limit = 0xff;
18047 }
18048 if (value > limit)
18049 {
18050 as_bad_where (fixP->fx_file, fixP->fx_line,
18051 _("offset out of range"));
18052 break;
18053 }
18054 newval &= ~limit;
18055 }
18056
18057 newval |= value;
18058 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
18059 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
18060 break;
18061
18062 case BFD_RELOC_ARM_SHIFT_IMM:
18063 newval = md_chars_to_number (buf, INSN_SIZE);
18064 if (((unsigned long) value) > 32
18065 || (value == 32
18066 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
18067 {
18068 as_bad_where (fixP->fx_file, fixP->fx_line,
18069 _("shift expression is too large"));
18070 break;
18071 }
18072
18073 if (value == 0)
18074 /* Shifts of zero must be done as lsl. */
18075 newval &= ~0x60;
18076 else if (value == 32)
18077 value = 0;
18078 newval &= 0xfffff07f;
18079 newval |= (value & 0x1f) << 7;
18080 md_number_to_chars (buf, newval, INSN_SIZE);
18081 break;
18082
18083 case BFD_RELOC_ARM_T32_IMMEDIATE:
18084 case BFD_RELOC_ARM_T32_ADD_IMM:
18085 case BFD_RELOC_ARM_T32_IMM12:
18086 case BFD_RELOC_ARM_T32_ADD_PC12:
18087 /* We claim that this fixup has been processed here,
18088 even if in fact we generate an error because we do
18089 not have a reloc for it, so tc_gen_reloc will reject it. */
18090 fixP->fx_done = 1;
18091
18092 if (fixP->fx_addsy
18093 && ! S_IS_DEFINED (fixP->fx_addsy))
18094 {
18095 as_bad_where (fixP->fx_file, fixP->fx_line,
18096 _("undefined symbol %s used as an immediate value"),
18097 S_GET_NAME (fixP->fx_addsy));
18098 break;
18099 }
18100
18101 newval = md_chars_to_number (buf, THUMB_SIZE);
18102 newval <<= 16;
18103 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
18104
18105 newimm = FAIL;
18106 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18107 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18108 {
18109 newimm = encode_thumb32_immediate (value);
18110 if (newimm == (unsigned int) FAIL)
18111 newimm = thumb32_negate_data_op (&newval, value);
18112 }
18113 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
18114 && newimm == (unsigned int) FAIL)
18115 {
18116 /* Turn add/sum into addw/subw. */
18117 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18118 newval = (newval & 0xfeffffff) | 0x02000000;
18119
18120 /* 12 bit immediate for addw/subw. */
18121 if (value < 0)
18122 {
18123 value = -value;
18124 newval ^= 0x00a00000;
18125 }
18126 if (value > 0xfff)
18127 newimm = (unsigned int) FAIL;
18128 else
18129 newimm = value;
18130 }
18131
18132 if (newimm == (unsigned int)FAIL)
18133 {
18134 as_bad_where (fixP->fx_file, fixP->fx_line,
18135 _("invalid constant (%lx) after fixup"),
18136 (unsigned long) value);
18137 break;
18138 }
18139
18140 newval |= (newimm & 0x800) << 15;
18141 newval |= (newimm & 0x700) << 4;
18142 newval |= (newimm & 0x0ff);
18143
18144 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
18145 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
18146 break;
18147
18148 case BFD_RELOC_ARM_SMC:
18149 if (((unsigned long) value) > 0xffff)
18150 as_bad_where (fixP->fx_file, fixP->fx_line,
18151 _("invalid smc expression"));
18152 newval = md_chars_to_number (buf, INSN_SIZE);
18153 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
18154 md_number_to_chars (buf, newval, INSN_SIZE);
18155 break;
18156
18157 case BFD_RELOC_ARM_SWI:
18158 if (fixP->tc_fix_data != 0)
18159 {
18160 if (((unsigned long) value) > 0xff)
18161 as_bad_where (fixP->fx_file, fixP->fx_line,
18162 _("invalid swi expression"));
18163 newval = md_chars_to_number (buf, THUMB_SIZE);
18164 newval |= value;
18165 md_number_to_chars (buf, newval, THUMB_SIZE);
18166 }
18167 else
18168 {
18169 if (((unsigned long) value) > 0x00ffffff)
18170 as_bad_where (fixP->fx_file, fixP->fx_line,
18171 _("invalid swi expression"));
18172 newval = md_chars_to_number (buf, INSN_SIZE);
18173 newval |= value;
18174 md_number_to_chars (buf, newval, INSN_SIZE);
18175 }
18176 break;
18177
18178 case BFD_RELOC_ARM_MULTI:
18179 if (((unsigned long) value) > 0xffff)
18180 as_bad_where (fixP->fx_file, fixP->fx_line,
18181 _("invalid expression in load/store multiple"));
18182 newval = value | md_chars_to_number (buf, INSN_SIZE);
18183 md_number_to_chars (buf, newval, INSN_SIZE);
18184 break;
18185
18186 #ifdef OBJ_ELF
18187 case BFD_RELOC_ARM_PCREL_CALL:
18188 newval = md_chars_to_number (buf, INSN_SIZE);
18189 if ((newval & 0xf0000000) == 0xf0000000)
18190 temp = 1;
18191 else
18192 temp = 3;
18193 goto arm_branch_common;
18194
18195 case BFD_RELOC_ARM_PCREL_JUMP:
18196 case BFD_RELOC_ARM_PLT32:
18197 #endif
18198 case BFD_RELOC_ARM_PCREL_BRANCH:
18199 temp = 3;
18200 goto arm_branch_common;
18201
18202 case BFD_RELOC_ARM_PCREL_BLX:
18203 temp = 1;
18204 arm_branch_common:
18205 /* We are going to store value (shifted right by two) in the
18206 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18207 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18208 also be be clear. */
18209 if (value & temp)
18210 as_bad_where (fixP->fx_file, fixP->fx_line,
18211 _("misaligned branch destination"));
18212 if ((value & (offsetT)0xfe000000) != (offsetT)0
18213 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
18214 as_bad_where (fixP->fx_file, fixP->fx_line,
18215 _("branch out of range"));
18216
18217 if (fixP->fx_done || !seg->use_rela_p)
18218 {
18219 newval = md_chars_to_number (buf, INSN_SIZE);
18220 newval |= (value >> 2) & 0x00ffffff;
18221 /* Set the H bit on BLX instructions. */
18222 if (temp == 1)
18223 {
18224 if (value & 2)
18225 newval |= 0x01000000;
18226 else
18227 newval &= ~0x01000000;
18228 }
18229 md_number_to_chars (buf, newval, INSN_SIZE);
18230 }
18231 break;
18232
18233 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
18234 /* CBZ can only branch forward. */
18235
18236 /* Attempts to use CBZ to branch to the next instruction
18237 (which, strictly speaking, are prohibited) will be turned into
18238 no-ops.
18239
18240 FIXME: It may be better to remove the instruction completely and
18241 perform relaxation. */
18242 if (value == -2)
18243 {
18244 newval = md_chars_to_number (buf, THUMB_SIZE);
18245 newval = 0xbf00; /* NOP encoding T1 */
18246 md_number_to_chars (buf, newval, THUMB_SIZE);
18247 }
18248 else
18249 {
18250 if (value & ~0x7e)
18251 as_bad_where (fixP->fx_file, fixP->fx_line,
18252 _("branch out of range"));
18253
18254 if (fixP->fx_done || !seg->use_rela_p)
18255 {
18256 newval = md_chars_to_number (buf, THUMB_SIZE);
18257 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
18258 md_number_to_chars (buf, newval, THUMB_SIZE);
18259 }
18260 }
18261 break;
18262
18263 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
18264 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
18265 as_bad_where (fixP->fx_file, fixP->fx_line,
18266 _("branch out of range"));
18267
18268 if (fixP->fx_done || !seg->use_rela_p)
18269 {
18270 newval = md_chars_to_number (buf, THUMB_SIZE);
18271 newval |= (value & 0x1ff) >> 1;
18272 md_number_to_chars (buf, newval, THUMB_SIZE);
18273 }
18274 break;
18275
18276 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18277 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18278 as_bad_where (fixP->fx_file, fixP->fx_line,
18279 _("branch out of range"));
18280
18281 if (fixP->fx_done || !seg->use_rela_p)
18282 {
18283 newval = md_chars_to_number (buf, THUMB_SIZE);
18284 newval |= (value & 0xfff) >> 1;
18285 md_number_to_chars (buf, newval, THUMB_SIZE);
18286 }
18287 break;
18288
18289 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18290 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18291 as_bad_where (fixP->fx_file, fixP->fx_line,
18292 _("conditional branch out of range"));
18293
18294 if (fixP->fx_done || !seg->use_rela_p)
18295 {
18296 offsetT newval2;
18297 addressT S, J1, J2, lo, hi;
18298
18299 S = (value & 0x00100000) >> 20;
18300 J2 = (value & 0x00080000) >> 19;
18301 J1 = (value & 0x00040000) >> 18;
18302 hi = (value & 0x0003f000) >> 12;
18303 lo = (value & 0x00000ffe) >> 1;
18304
18305 newval = md_chars_to_number (buf, THUMB_SIZE);
18306 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18307 newval |= (S << 10) | hi;
18308 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18309 md_number_to_chars (buf, newval, THUMB_SIZE);
18310 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18311 }
18312 break;
18313
18314 case BFD_RELOC_THUMB_PCREL_BLX:
18315 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18316 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
18317 as_bad_where (fixP->fx_file, fixP->fx_line,
18318 _("branch out of range"));
18319
18320 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
18321 /* For a BLX instruction, make sure that the relocation is rounded up
18322 to a word boundary. This follows the semantics of the instruction
18323 which specifies that bit 1 of the target address will come from bit
18324 1 of the base address. */
18325 value = (value + 1) & ~ 1;
18326
18327 if (fixP->fx_done || !seg->use_rela_p)
18328 {
18329 offsetT newval2;
18330
18331 newval = md_chars_to_number (buf, THUMB_SIZE);
18332 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18333 newval |= (value & 0x7fffff) >> 12;
18334 newval2 |= (value & 0xfff) >> 1;
18335 md_number_to_chars (buf, newval, THUMB_SIZE);
18336 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18337 }
18338 break;
18339
18340 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18341 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
18342 as_bad_where (fixP->fx_file, fixP->fx_line,
18343 _("branch out of range"));
18344
18345 if (fixP->fx_done || !seg->use_rela_p)
18346 {
18347 offsetT newval2;
18348 addressT S, I1, I2, lo, hi;
18349
18350 S = (value & 0x01000000) >> 24;
18351 I1 = (value & 0x00800000) >> 23;
18352 I2 = (value & 0x00400000) >> 22;
18353 hi = (value & 0x003ff000) >> 12;
18354 lo = (value & 0x00000ffe) >> 1;
18355
18356 I1 = !(I1 ^ S);
18357 I2 = !(I2 ^ S);
18358
18359 newval = md_chars_to_number (buf, THUMB_SIZE);
18360 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18361 newval |= (S << 10) | hi;
18362 newval2 |= (I1 << 13) | (I2 << 11) | lo;
18363 md_number_to_chars (buf, newval, THUMB_SIZE);
18364 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18365 }
18366 break;
18367
18368 case BFD_RELOC_8:
18369 if (fixP->fx_done || !seg->use_rela_p)
18370 md_number_to_chars (buf, value, 1);
18371 break;
18372
18373 case BFD_RELOC_16:
18374 if (fixP->fx_done || !seg->use_rela_p)
18375 md_number_to_chars (buf, value, 2);
18376 break;
18377
18378 #ifdef OBJ_ELF
18379 case BFD_RELOC_ARM_TLS_GD32:
18380 case BFD_RELOC_ARM_TLS_LE32:
18381 case BFD_RELOC_ARM_TLS_IE32:
18382 case BFD_RELOC_ARM_TLS_LDM32:
18383 case BFD_RELOC_ARM_TLS_LDO32:
18384 S_SET_THREAD_LOCAL (fixP->fx_addsy);
18385 /* fall through */
18386
18387 case BFD_RELOC_ARM_GOT32:
18388 case BFD_RELOC_ARM_GOTOFF:
18389 case BFD_RELOC_ARM_TARGET2:
18390 if (fixP->fx_done || !seg->use_rela_p)
18391 md_number_to_chars (buf, 0, 4);
18392 break;
18393 #endif
18394
18395 case BFD_RELOC_RVA:
18396 case BFD_RELOC_32:
18397 case BFD_RELOC_ARM_TARGET1:
18398 case BFD_RELOC_ARM_ROSEGREL32:
18399 case BFD_RELOC_ARM_SBREL32:
18400 case BFD_RELOC_32_PCREL:
18401 #ifdef TE_PE
18402 case BFD_RELOC_32_SECREL:
18403 #endif
18404 if (fixP->fx_done || !seg->use_rela_p)
18405 #ifdef TE_WINCE
18406 /* For WinCE we only do this for pcrel fixups. */
18407 if (fixP->fx_done || fixP->fx_pcrel)
18408 #endif
18409 md_number_to_chars (buf, value, 4);
18410 break;
18411
18412 #ifdef OBJ_ELF
18413 case BFD_RELOC_ARM_PREL31:
18414 if (fixP->fx_done || !seg->use_rela_p)
18415 {
18416 newval = md_chars_to_number (buf, 4) & 0x80000000;
18417 if ((value ^ (value >> 1)) & 0x40000000)
18418 {
18419 as_bad_where (fixP->fx_file, fixP->fx_line,
18420 _("rel31 relocation overflow"));
18421 }
18422 newval |= value & 0x7fffffff;
18423 md_number_to_chars (buf, newval, 4);
18424 }
18425 break;
18426 #endif
18427
18428 case BFD_RELOC_ARM_CP_OFF_IMM:
18429 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18430 if (value < -1023 || value > 1023 || (value & 3))
18431 as_bad_where (fixP->fx_file, fixP->fx_line,
18432 _("co-processor offset out of range"));
18433 cp_off_common:
18434 sign = value >= 0;
18435 if (value < 0)
18436 value = -value;
18437 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18438 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18439 newval = md_chars_to_number (buf, INSN_SIZE);
18440 else
18441 newval = get_thumb32_insn (buf);
18442 newval &= 0xff7fff00;
18443 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
18444 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18445 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18446 md_number_to_chars (buf, newval, INSN_SIZE);
18447 else
18448 put_thumb32_insn (buf, newval);
18449 break;
18450
18451 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
18452 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
18453 if (value < -255 || value > 255)
18454 as_bad_where (fixP->fx_file, fixP->fx_line,
18455 _("co-processor offset out of range"));
18456 value *= 4;
18457 goto cp_off_common;
18458
18459 case BFD_RELOC_ARM_THUMB_OFFSET:
18460 newval = md_chars_to_number (buf, THUMB_SIZE);
18461 /* Exactly what ranges, and where the offset is inserted depends
18462 on the type of instruction, we can establish this from the
18463 top 4 bits. */
18464 switch (newval >> 12)
18465 {
18466 case 4: /* PC load. */
18467 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18468 forced to zero for these loads; md_pcrel_from has already
18469 compensated for this. */
18470 if (value & 3)
18471 as_bad_where (fixP->fx_file, fixP->fx_line,
18472 _("invalid offset, target not word aligned (0x%08lX)"),
18473 (((unsigned long) fixP->fx_frag->fr_address
18474 + (unsigned long) fixP->fx_where) & ~3)
18475 + (unsigned long) value);
18476
18477 if (value & ~0x3fc)
18478 as_bad_where (fixP->fx_file, fixP->fx_line,
18479 _("invalid offset, value too big (0x%08lX)"),
18480 (long) value);
18481
18482 newval |= value >> 2;
18483 break;
18484
18485 case 9: /* SP load/store. */
18486 if (value & ~0x3fc)
18487 as_bad_where (fixP->fx_file, fixP->fx_line,
18488 _("invalid offset, value too big (0x%08lX)"),
18489 (long) value);
18490 newval |= value >> 2;
18491 break;
18492
18493 case 6: /* Word load/store. */
18494 if (value & ~0x7c)
18495 as_bad_where (fixP->fx_file, fixP->fx_line,
18496 _("invalid offset, value too big (0x%08lX)"),
18497 (long) value);
18498 newval |= value << 4; /* 6 - 2. */
18499 break;
18500
18501 case 7: /* Byte load/store. */
18502 if (value & ~0x1f)
18503 as_bad_where (fixP->fx_file, fixP->fx_line,
18504 _("invalid offset, value too big (0x%08lX)"),
18505 (long) value);
18506 newval |= value << 6;
18507 break;
18508
18509 case 8: /* Halfword load/store. */
18510 if (value & ~0x3e)
18511 as_bad_where (fixP->fx_file, fixP->fx_line,
18512 _("invalid offset, value too big (0x%08lX)"),
18513 (long) value);
18514 newval |= value << 5; /* 6 - 1. */
18515 break;
18516
18517 default:
18518 as_bad_where (fixP->fx_file, fixP->fx_line,
18519 "Unable to process relocation for thumb opcode: %lx",
18520 (unsigned long) newval);
18521 break;
18522 }
18523 md_number_to_chars (buf, newval, THUMB_SIZE);
18524 break;
18525
18526 case BFD_RELOC_ARM_THUMB_ADD:
18527 /* This is a complicated relocation, since we use it for all of
18528 the following immediate relocations:
18529
18530 3bit ADD/SUB
18531 8bit ADD/SUB
18532 9bit ADD/SUB SP word-aligned
18533 10bit ADD PC/SP word-aligned
18534
18535 The type of instruction being processed is encoded in the
18536 instruction field:
18537
18538 0x8000 SUB
18539 0x00F0 Rd
18540 0x000F Rs
18541 */
18542 newval = md_chars_to_number (buf, THUMB_SIZE);
18543 {
18544 int rd = (newval >> 4) & 0xf;
18545 int rs = newval & 0xf;
18546 int subtract = !!(newval & 0x8000);
18547
18548 /* Check for HI regs, only very restricted cases allowed:
18549 Adjusting SP, and using PC or SP to get an address. */
18550 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
18551 || (rs > 7 && rs != REG_SP && rs != REG_PC))
18552 as_bad_where (fixP->fx_file, fixP->fx_line,
18553 _("invalid Hi register with immediate"));
18554
18555 /* If value is negative, choose the opposite instruction. */
18556 if (value < 0)
18557 {
18558 value = -value;
18559 subtract = !subtract;
18560 if (value < 0)
18561 as_bad_where (fixP->fx_file, fixP->fx_line,
18562 _("immediate value out of range"));
18563 }
18564
18565 if (rd == REG_SP)
18566 {
18567 if (value & ~0x1fc)
18568 as_bad_where (fixP->fx_file, fixP->fx_line,
18569 _("invalid immediate for stack address calculation"));
18570 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
18571 newval |= value >> 2;
18572 }
18573 else if (rs == REG_PC || rs == REG_SP)
18574 {
18575 if (subtract || value & ~0x3fc)
18576 as_bad_where (fixP->fx_file, fixP->fx_line,
18577 _("invalid immediate for address calculation (value = 0x%08lX)"),
18578 (unsigned long) value);
18579 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
18580 newval |= rd << 8;
18581 newval |= value >> 2;
18582 }
18583 else if (rs == rd)
18584 {
18585 if (value & ~0xff)
18586 as_bad_where (fixP->fx_file, fixP->fx_line,
18587 _("immediate value out of range"));
18588 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
18589 newval |= (rd << 8) | value;
18590 }
18591 else
18592 {
18593 if (value & ~0x7)
18594 as_bad_where (fixP->fx_file, fixP->fx_line,
18595 _("immediate value out of range"));
18596 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
18597 newval |= rd | (rs << 3) | (value << 6);
18598 }
18599 }
18600 md_number_to_chars (buf, newval, THUMB_SIZE);
18601 break;
18602
18603 case BFD_RELOC_ARM_THUMB_IMM:
18604 newval = md_chars_to_number (buf, THUMB_SIZE);
18605 if (value < 0 || value > 255)
18606 as_bad_where (fixP->fx_file, fixP->fx_line,
18607 _("invalid immediate: %ld is too large"),
18608 (long) value);
18609 newval |= value;
18610 md_number_to_chars (buf, newval, THUMB_SIZE);
18611 break;
18612
18613 case BFD_RELOC_ARM_THUMB_SHIFT:
18614 /* 5bit shift value (0..32). LSL cannot take 32. */
18615 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
18616 temp = newval & 0xf800;
18617 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
18618 as_bad_where (fixP->fx_file, fixP->fx_line,
18619 _("invalid shift value: %ld"), (long) value);
18620 /* Shifts of zero must be encoded as LSL. */
18621 if (value == 0)
18622 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
18623 /* Shifts of 32 are encoded as zero. */
18624 else if (value == 32)
18625 value = 0;
18626 newval |= value << 6;
18627 md_number_to_chars (buf, newval, THUMB_SIZE);
18628 break;
18629
18630 case BFD_RELOC_VTABLE_INHERIT:
18631 case BFD_RELOC_VTABLE_ENTRY:
18632 fixP->fx_done = 0;
18633 return;
18634
18635 case BFD_RELOC_ARM_MOVW:
18636 case BFD_RELOC_ARM_MOVT:
18637 case BFD_RELOC_ARM_THUMB_MOVW:
18638 case BFD_RELOC_ARM_THUMB_MOVT:
18639 if (fixP->fx_done || !seg->use_rela_p)
18640 {
18641 /* REL format relocations are limited to a 16-bit addend. */
18642 if (!fixP->fx_done)
18643 {
18644 if (value < -0x1000 || value > 0xffff)
18645 as_bad_where (fixP->fx_file, fixP->fx_line,
18646 _("offset too big"));
18647 }
18648 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
18649 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18650 {
18651 value >>= 16;
18652 }
18653
18654 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
18655 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18656 {
18657 newval = get_thumb32_insn (buf);
18658 newval &= 0xfbf08f00;
18659 newval |= (value & 0xf000) << 4;
18660 newval |= (value & 0x0800) << 15;
18661 newval |= (value & 0x0700) << 4;
18662 newval |= (value & 0x00ff);
18663 put_thumb32_insn (buf, newval);
18664 }
18665 else
18666 {
18667 newval = md_chars_to_number (buf, 4);
18668 newval &= 0xfff0f000;
18669 newval |= value & 0x0fff;
18670 newval |= (value & 0xf000) << 4;
18671 md_number_to_chars (buf, newval, 4);
18672 }
18673 }
18674 return;
18675
18676 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18677 case BFD_RELOC_ARM_ALU_PC_G0:
18678 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18679 case BFD_RELOC_ARM_ALU_PC_G1:
18680 case BFD_RELOC_ARM_ALU_PC_G2:
18681 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18682 case BFD_RELOC_ARM_ALU_SB_G0:
18683 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18684 case BFD_RELOC_ARM_ALU_SB_G1:
18685 case BFD_RELOC_ARM_ALU_SB_G2:
18686 assert (!fixP->fx_done);
18687 if (!seg->use_rela_p)
18688 {
18689 bfd_vma insn;
18690 bfd_vma encoded_addend;
18691 bfd_vma addend_abs = abs (value);
18692
18693 /* Check that the absolute value of the addend can be
18694 expressed as an 8-bit constant plus a rotation. */
18695 encoded_addend = encode_arm_immediate (addend_abs);
18696 if (encoded_addend == (unsigned int) FAIL)
18697 as_bad_where (fixP->fx_file, fixP->fx_line,
18698 _("the offset 0x%08lX is not representable"),
18699 addend_abs);
18700
18701 /* Extract the instruction. */
18702 insn = md_chars_to_number (buf, INSN_SIZE);
18703
18704 /* If the addend is positive, use an ADD instruction.
18705 Otherwise use a SUB. Take care not to destroy the S bit. */
18706 insn &= 0xff1fffff;
18707 if (value < 0)
18708 insn |= 1 << 22;
18709 else
18710 insn |= 1 << 23;
18711
18712 /* Place the encoded addend into the first 12 bits of the
18713 instruction. */
18714 insn &= 0xfffff000;
18715 insn |= encoded_addend;
18716
18717 /* Update the instruction. */
18718 md_number_to_chars (buf, insn, INSN_SIZE);
18719 }
18720 break;
18721
18722 case BFD_RELOC_ARM_LDR_PC_G0:
18723 case BFD_RELOC_ARM_LDR_PC_G1:
18724 case BFD_RELOC_ARM_LDR_PC_G2:
18725 case BFD_RELOC_ARM_LDR_SB_G0:
18726 case BFD_RELOC_ARM_LDR_SB_G1:
18727 case BFD_RELOC_ARM_LDR_SB_G2:
18728 assert (!fixP->fx_done);
18729 if (!seg->use_rela_p)
18730 {
18731 bfd_vma insn;
18732 bfd_vma addend_abs = abs (value);
18733
18734 /* Check that the absolute value of the addend can be
18735 encoded in 12 bits. */
18736 if (addend_abs >= 0x1000)
18737 as_bad_where (fixP->fx_file, fixP->fx_line,
18738 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18739 addend_abs);
18740
18741 /* Extract the instruction. */
18742 insn = md_chars_to_number (buf, INSN_SIZE);
18743
18744 /* If the addend is negative, clear bit 23 of the instruction.
18745 Otherwise set it. */
18746 if (value < 0)
18747 insn &= ~(1 << 23);
18748 else
18749 insn |= 1 << 23;
18750
18751 /* Place the absolute value of the addend into the first 12 bits
18752 of the instruction. */
18753 insn &= 0xfffff000;
18754 insn |= addend_abs;
18755
18756 /* Update the instruction. */
18757 md_number_to_chars (buf, insn, INSN_SIZE);
18758 }
18759 break;
18760
18761 case BFD_RELOC_ARM_LDRS_PC_G0:
18762 case BFD_RELOC_ARM_LDRS_PC_G1:
18763 case BFD_RELOC_ARM_LDRS_PC_G2:
18764 case BFD_RELOC_ARM_LDRS_SB_G0:
18765 case BFD_RELOC_ARM_LDRS_SB_G1:
18766 case BFD_RELOC_ARM_LDRS_SB_G2:
18767 assert (!fixP->fx_done);
18768 if (!seg->use_rela_p)
18769 {
18770 bfd_vma insn;
18771 bfd_vma addend_abs = abs (value);
18772
18773 /* Check that the absolute value of the addend can be
18774 encoded in 8 bits. */
18775 if (addend_abs >= 0x100)
18776 as_bad_where (fixP->fx_file, fixP->fx_line,
18777 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18778 addend_abs);
18779
18780 /* Extract the instruction. */
18781 insn = md_chars_to_number (buf, INSN_SIZE);
18782
18783 /* If the addend is negative, clear bit 23 of the instruction.
18784 Otherwise set it. */
18785 if (value < 0)
18786 insn &= ~(1 << 23);
18787 else
18788 insn |= 1 << 23;
18789
18790 /* Place the first four bits of the absolute value of the addend
18791 into the first 4 bits of the instruction, and the remaining
18792 four into bits 8 .. 11. */
18793 insn &= 0xfffff0f0;
18794 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
18795
18796 /* Update the instruction. */
18797 md_number_to_chars (buf, insn, INSN_SIZE);
18798 }
18799 break;
18800
18801 case BFD_RELOC_ARM_LDC_PC_G0:
18802 case BFD_RELOC_ARM_LDC_PC_G1:
18803 case BFD_RELOC_ARM_LDC_PC_G2:
18804 case BFD_RELOC_ARM_LDC_SB_G0:
18805 case BFD_RELOC_ARM_LDC_SB_G1:
18806 case BFD_RELOC_ARM_LDC_SB_G2:
18807 assert (!fixP->fx_done);
18808 if (!seg->use_rela_p)
18809 {
18810 bfd_vma insn;
18811 bfd_vma addend_abs = abs (value);
18812
18813 /* Check that the absolute value of the addend is a multiple of
18814 four and, when divided by four, fits in 8 bits. */
18815 if (addend_abs & 0x3)
18816 as_bad_where (fixP->fx_file, fixP->fx_line,
18817 _("bad offset 0x%08lX (must be word-aligned)"),
18818 addend_abs);
18819
18820 if ((addend_abs >> 2) > 0xff)
18821 as_bad_where (fixP->fx_file, fixP->fx_line,
18822 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18823 addend_abs);
18824
18825 /* Extract the instruction. */
18826 insn = md_chars_to_number (buf, INSN_SIZE);
18827
18828 /* If the addend is negative, clear bit 23 of the instruction.
18829 Otherwise set it. */
18830 if (value < 0)
18831 insn &= ~(1 << 23);
18832 else
18833 insn |= 1 << 23;
18834
18835 /* Place the addend (divided by four) into the first eight
18836 bits of the instruction. */
18837 insn &= 0xfffffff0;
18838 insn |= addend_abs >> 2;
18839
18840 /* Update the instruction. */
18841 md_number_to_chars (buf, insn, INSN_SIZE);
18842 }
18843 break;
18844
18845 case BFD_RELOC_UNUSED:
18846 default:
18847 as_bad_where (fixP->fx_file, fixP->fx_line,
18848 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
18849 }
18850 }
18851
18852 /* Translate internal representation of relocation info to BFD target
18853 format. */
18854
18855 arelent *
18856 tc_gen_reloc (asection *section, fixS *fixp)
18857 {
18858 arelent * reloc;
18859 bfd_reloc_code_real_type code;
18860
18861 reloc = xmalloc (sizeof (arelent));
18862
18863 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
18864 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
18865 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
18866
18867 if (fixp->fx_pcrel)
18868 {
18869 if (section->use_rela_p)
18870 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
18871 else
18872 fixp->fx_offset = reloc->address;
18873 }
18874 reloc->addend = fixp->fx_offset;
18875
18876 switch (fixp->fx_r_type)
18877 {
18878 case BFD_RELOC_8:
18879 if (fixp->fx_pcrel)
18880 {
18881 code = BFD_RELOC_8_PCREL;
18882 break;
18883 }
18884
18885 case BFD_RELOC_16:
18886 if (fixp->fx_pcrel)
18887 {
18888 code = BFD_RELOC_16_PCREL;
18889 break;
18890 }
18891
18892 case BFD_RELOC_32:
18893 if (fixp->fx_pcrel)
18894 {
18895 code = BFD_RELOC_32_PCREL;
18896 break;
18897 }
18898
18899 case BFD_RELOC_ARM_MOVW:
18900 if (fixp->fx_pcrel)
18901 {
18902 code = BFD_RELOC_ARM_MOVW_PCREL;
18903 break;
18904 }
18905
18906 case BFD_RELOC_ARM_MOVT:
18907 if (fixp->fx_pcrel)
18908 {
18909 code = BFD_RELOC_ARM_MOVT_PCREL;
18910 break;
18911 }
18912
18913 case BFD_RELOC_ARM_THUMB_MOVW:
18914 if (fixp->fx_pcrel)
18915 {
18916 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
18917 break;
18918 }
18919
18920 case BFD_RELOC_ARM_THUMB_MOVT:
18921 if (fixp->fx_pcrel)
18922 {
18923 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
18924 break;
18925 }
18926
18927 case BFD_RELOC_NONE:
18928 case BFD_RELOC_ARM_PCREL_BRANCH:
18929 case BFD_RELOC_ARM_PCREL_BLX:
18930 case BFD_RELOC_RVA:
18931 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18932 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18933 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18934 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18935 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18936 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18937 case BFD_RELOC_THUMB_PCREL_BLX:
18938 case BFD_RELOC_VTABLE_ENTRY:
18939 case BFD_RELOC_VTABLE_INHERIT:
18940 #ifdef TE_PE
18941 case BFD_RELOC_32_SECREL:
18942 #endif
18943 code = fixp->fx_r_type;
18944 break;
18945
18946 case BFD_RELOC_ARM_LITERAL:
18947 case BFD_RELOC_ARM_HWLITERAL:
18948 /* If this is called then the a literal has
18949 been referenced across a section boundary. */
18950 as_bad_where (fixp->fx_file, fixp->fx_line,
18951 _("literal referenced across section boundary"));
18952 return NULL;
18953
18954 #ifdef OBJ_ELF
18955 case BFD_RELOC_ARM_GOT32:
18956 case BFD_RELOC_ARM_GOTOFF:
18957 case BFD_RELOC_ARM_PLT32:
18958 case BFD_RELOC_ARM_TARGET1:
18959 case BFD_RELOC_ARM_ROSEGREL32:
18960 case BFD_RELOC_ARM_SBREL32:
18961 case BFD_RELOC_ARM_PREL31:
18962 case BFD_RELOC_ARM_TARGET2:
18963 case BFD_RELOC_ARM_TLS_LE32:
18964 case BFD_RELOC_ARM_TLS_LDO32:
18965 case BFD_RELOC_ARM_PCREL_CALL:
18966 case BFD_RELOC_ARM_PCREL_JUMP:
18967 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18968 case BFD_RELOC_ARM_ALU_PC_G0:
18969 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18970 case BFD_RELOC_ARM_ALU_PC_G1:
18971 case BFD_RELOC_ARM_ALU_PC_G2:
18972 case BFD_RELOC_ARM_LDR_PC_G0:
18973 case BFD_RELOC_ARM_LDR_PC_G1:
18974 case BFD_RELOC_ARM_LDR_PC_G2:
18975 case BFD_RELOC_ARM_LDRS_PC_G0:
18976 case BFD_RELOC_ARM_LDRS_PC_G1:
18977 case BFD_RELOC_ARM_LDRS_PC_G2:
18978 case BFD_RELOC_ARM_LDC_PC_G0:
18979 case BFD_RELOC_ARM_LDC_PC_G1:
18980 case BFD_RELOC_ARM_LDC_PC_G2:
18981 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18982 case BFD_RELOC_ARM_ALU_SB_G0:
18983 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18984 case BFD_RELOC_ARM_ALU_SB_G1:
18985 case BFD_RELOC_ARM_ALU_SB_G2:
18986 case BFD_RELOC_ARM_LDR_SB_G0:
18987 case BFD_RELOC_ARM_LDR_SB_G1:
18988 case BFD_RELOC_ARM_LDR_SB_G2:
18989 case BFD_RELOC_ARM_LDRS_SB_G0:
18990 case BFD_RELOC_ARM_LDRS_SB_G1:
18991 case BFD_RELOC_ARM_LDRS_SB_G2:
18992 case BFD_RELOC_ARM_LDC_SB_G0:
18993 case BFD_RELOC_ARM_LDC_SB_G1:
18994 case BFD_RELOC_ARM_LDC_SB_G2:
18995 code = fixp->fx_r_type;
18996 break;
18997
18998 case BFD_RELOC_ARM_TLS_GD32:
18999 case BFD_RELOC_ARM_TLS_IE32:
19000 case BFD_RELOC_ARM_TLS_LDM32:
19001 /* BFD will include the symbol's address in the addend.
19002 But we don't want that, so subtract it out again here. */
19003 if (!S_IS_COMMON (fixp->fx_addsy))
19004 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
19005 code = fixp->fx_r_type;
19006 break;
19007 #endif
19008
19009 case BFD_RELOC_ARM_IMMEDIATE:
19010 as_bad_where (fixp->fx_file, fixp->fx_line,
19011 _("internal relocation (type: IMMEDIATE) not fixed up"));
19012 return NULL;
19013
19014 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19015 as_bad_where (fixp->fx_file, fixp->fx_line,
19016 _("ADRL used for a symbol not defined in the same file"));
19017 return NULL;
19018
19019 case BFD_RELOC_ARM_OFFSET_IMM:
19020 if (section->use_rela_p)
19021 {
19022 code = fixp->fx_r_type;
19023 break;
19024 }
19025
19026 if (fixp->fx_addsy != NULL
19027 && !S_IS_DEFINED (fixp->fx_addsy)
19028 && S_IS_LOCAL (fixp->fx_addsy))
19029 {
19030 as_bad_where (fixp->fx_file, fixp->fx_line,
19031 _("undefined local label `%s'"),
19032 S_GET_NAME (fixp->fx_addsy));
19033 return NULL;
19034 }
19035
19036 as_bad_where (fixp->fx_file, fixp->fx_line,
19037 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
19038 return NULL;
19039
19040 default:
19041 {
19042 char * type;
19043
19044 switch (fixp->fx_r_type)
19045 {
19046 case BFD_RELOC_NONE: type = "NONE"; break;
19047 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
19048 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
19049 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
19050 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
19051 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
19052 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
19053 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
19054 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
19055 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
19056 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
19057 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
19058 default: type = _("<unknown>"); break;
19059 }
19060 as_bad_where (fixp->fx_file, fixp->fx_line,
19061 _("cannot represent %s relocation in this object file format"),
19062 type);
19063 return NULL;
19064 }
19065 }
19066
19067 #ifdef OBJ_ELF
19068 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
19069 && GOT_symbol
19070 && fixp->fx_addsy == GOT_symbol)
19071 {
19072 code = BFD_RELOC_ARM_GOTPC;
19073 reloc->addend = fixp->fx_offset = reloc->address;
19074 }
19075 #endif
19076
19077 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
19078
19079 if (reloc->howto == NULL)
19080 {
19081 as_bad_where (fixp->fx_file, fixp->fx_line,
19082 _("cannot represent %s relocation in this object file format"),
19083 bfd_get_reloc_code_name (code));
19084 return NULL;
19085 }
19086
19087 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19088 vtable entry to be used in the relocation's section offset. */
19089 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19090 reloc->address = fixp->fx_offset;
19091
19092 return reloc;
19093 }
19094
19095 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19096
19097 void
19098 cons_fix_new_arm (fragS * frag,
19099 int where,
19100 int size,
19101 expressionS * exp)
19102 {
19103 bfd_reloc_code_real_type type;
19104 int pcrel = 0;
19105
19106 /* Pick a reloc.
19107 FIXME: @@ Should look at CPU word size. */
19108 switch (size)
19109 {
19110 case 1:
19111 type = BFD_RELOC_8;
19112 break;
19113 case 2:
19114 type = BFD_RELOC_16;
19115 break;
19116 case 4:
19117 default:
19118 type = BFD_RELOC_32;
19119 break;
19120 case 8:
19121 type = BFD_RELOC_64;
19122 break;
19123 }
19124
19125 #ifdef TE_PE
19126 if (exp->X_op == O_secrel)
19127 {
19128 exp->X_op = O_symbol;
19129 type = BFD_RELOC_32_SECREL;
19130 }
19131 #endif
19132
19133 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
19134 }
19135
19136 #if defined OBJ_COFF || defined OBJ_ELF
19137 void
19138 arm_validate_fix (fixS * fixP)
19139 {
19140 /* If the destination of the branch is a defined symbol which does not have
19141 the THUMB_FUNC attribute, then we must be calling a function which has
19142 the (interfacearm) attribute. We look for the Thumb entry point to that
19143 function and change the branch to refer to that function instead. */
19144 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
19145 && fixP->fx_addsy != NULL
19146 && S_IS_DEFINED (fixP->fx_addsy)
19147 && ! THUMB_IS_FUNC (fixP->fx_addsy))
19148 {
19149 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
19150 }
19151 }
19152 #endif
19153
19154 int
19155 arm_force_relocation (struct fix * fixp)
19156 {
19157 #if defined (OBJ_COFF) && defined (TE_PE)
19158 if (fixp->fx_r_type == BFD_RELOC_RVA)
19159 return 1;
19160 #endif
19161
19162 /* Resolve these relocations even if the symbol is extern or weak. */
19163 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
19164 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
19165 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
19166 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
19167 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19168 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
19169 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
19170 return 0;
19171
19172 /* Always leave these relocations for the linker. */
19173 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19174 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19175 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19176 return 1;
19177
19178 /* Always generate relocations against function symbols. */
19179 if (fixp->fx_r_type == BFD_RELOC_32
19180 && fixp->fx_addsy
19181 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
19182 return 1;
19183
19184 return generic_force_reloc (fixp);
19185 }
19186
19187 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19188 /* Relocations against function names must be left unadjusted,
19189 so that the linker can use this information to generate interworking
19190 stubs. The MIPS version of this function
19191 also prevents relocations that are mips-16 specific, but I do not
19192 know why it does this.
19193
19194 FIXME:
19195 There is one other problem that ought to be addressed here, but
19196 which currently is not: Taking the address of a label (rather
19197 than a function) and then later jumping to that address. Such
19198 addresses also ought to have their bottom bit set (assuming that
19199 they reside in Thumb code), but at the moment they will not. */
19200
19201 bfd_boolean
19202 arm_fix_adjustable (fixS * fixP)
19203 {
19204 if (fixP->fx_addsy == NULL)
19205 return 1;
19206
19207 /* Preserve relocations against symbols with function type. */
19208 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
19209 return 0;
19210
19211 if (THUMB_IS_FUNC (fixP->fx_addsy)
19212 && fixP->fx_subsy == NULL)
19213 return 0;
19214
19215 /* We need the symbol name for the VTABLE entries. */
19216 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
19217 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19218 return 0;
19219
19220 /* Don't allow symbols to be discarded on GOT related relocs. */
19221 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
19222 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
19223 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
19224 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
19225 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
19226 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
19227 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
19228 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
19229 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
19230 return 0;
19231
19232 /* Similarly for group relocations. */
19233 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19234 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19235 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19236 return 0;
19237
19238 return 1;
19239 }
19240 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19241
19242 #ifdef OBJ_ELF
19243
19244 const char *
19245 elf32_arm_target_format (void)
19246 {
19247 #ifdef TE_SYMBIAN
19248 return (target_big_endian
19249 ? "elf32-bigarm-symbian"
19250 : "elf32-littlearm-symbian");
19251 #elif defined (TE_VXWORKS)
19252 return (target_big_endian
19253 ? "elf32-bigarm-vxworks"
19254 : "elf32-littlearm-vxworks");
19255 #else
19256 if (target_big_endian)
19257 return "elf32-bigarm";
19258 else
19259 return "elf32-littlearm";
19260 #endif
19261 }
19262
19263 void
19264 armelf_frob_symbol (symbolS * symp,
19265 int * puntp)
19266 {
19267 elf_frob_symbol (symp, puntp);
19268 }
19269 #endif
19270
19271 /* MD interface: Finalization. */
19272
19273 /* A good place to do this, although this was probably not intended
19274 for this kind of use. We need to dump the literal pool before
19275 references are made to a null symbol pointer. */
19276
19277 void
19278 arm_cleanup (void)
19279 {
19280 literal_pool * pool;
19281
19282 for (pool = list_of_pools; pool; pool = pool->next)
19283 {
19284 /* Put it at the end of the relevent section. */
19285 subseg_set (pool->section, pool->sub_section);
19286 #ifdef OBJ_ELF
19287 arm_elf_change_section ();
19288 #endif
19289 s_ltorg (0);
19290 }
19291 }
19292
19293 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19294 ARM ones. */
19295
19296 void
19297 arm_adjust_symtab (void)
19298 {
19299 #ifdef OBJ_COFF
19300 symbolS * sym;
19301
19302 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19303 {
19304 if (ARM_IS_THUMB (sym))
19305 {
19306 if (THUMB_IS_FUNC (sym))
19307 {
19308 /* Mark the symbol as a Thumb function. */
19309 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
19310 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
19311 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
19312
19313 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
19314 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
19315 else
19316 as_bad (_("%s: unexpected function type: %d"),
19317 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
19318 }
19319 else switch (S_GET_STORAGE_CLASS (sym))
19320 {
19321 case C_EXT:
19322 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
19323 break;
19324 case C_STAT:
19325 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
19326 break;
19327 case C_LABEL:
19328 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
19329 break;
19330 default:
19331 /* Do nothing. */
19332 break;
19333 }
19334 }
19335
19336 if (ARM_IS_INTERWORK (sym))
19337 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
19338 }
19339 #endif
19340 #ifdef OBJ_ELF
19341 symbolS * sym;
19342 char bind;
19343
19344 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19345 {
19346 if (ARM_IS_THUMB (sym))
19347 {
19348 elf_symbol_type * elf_sym;
19349
19350 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
19351 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
19352
19353 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
19354 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
19355 {
19356 /* If it's a .thumb_func, declare it as so,
19357 otherwise tag label as .code 16. */
19358 if (THUMB_IS_FUNC (sym))
19359 elf_sym->internal_elf_sym.st_info =
19360 ELF_ST_INFO (bind, STT_ARM_TFUNC);
19361 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
19362 elf_sym->internal_elf_sym.st_info =
19363 ELF_ST_INFO (bind, STT_ARM_16BIT);
19364 }
19365 }
19366 }
19367 #endif
19368 }
19369
19370 /* MD interface: Initialization. */
19371
19372 static void
19373 set_constant_flonums (void)
19374 {
19375 int i;
19376
19377 for (i = 0; i < NUM_FLOAT_VALS; i++)
19378 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
19379 abort ();
19380 }
19381
19382 /* Auto-select Thumb mode if it's the only available instruction set for the
19383 given architecture. */
19384
19385 static void
19386 autoselect_thumb_from_cpu_variant (void)
19387 {
19388 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19389 opcode_select (16);
19390 }
19391
19392 void
19393 md_begin (void)
19394 {
19395 unsigned mach;
19396 unsigned int i;
19397
19398 if ( (arm_ops_hsh = hash_new ()) == NULL
19399 || (arm_cond_hsh = hash_new ()) == NULL
19400 || (arm_shift_hsh = hash_new ()) == NULL
19401 || (arm_psr_hsh = hash_new ()) == NULL
19402 || (arm_v7m_psr_hsh = hash_new ()) == NULL
19403 || (arm_reg_hsh = hash_new ()) == NULL
19404 || (arm_reloc_hsh = hash_new ()) == NULL
19405 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
19406 as_fatal (_("virtual memory exhausted"));
19407
19408 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
19409 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
19410 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
19411 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
19412 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
19413 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
19414 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
19415 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
19416 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
19417 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
19418 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
19419 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
19420 for (i = 0;
19421 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
19422 i++)
19423 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
19424 (PTR) (barrier_opt_names + i));
19425 #ifdef OBJ_ELF
19426 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
19427 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
19428 #endif
19429
19430 set_constant_flonums ();
19431
19432 /* Set the cpu variant based on the command-line options. We prefer
19433 -mcpu= over -march= if both are set (as for GCC); and we prefer
19434 -mfpu= over any other way of setting the floating point unit.
19435 Use of legacy options with new options are faulted. */
19436 if (legacy_cpu)
19437 {
19438 if (mcpu_cpu_opt || march_cpu_opt)
19439 as_bad (_("use of old and new-style options to set CPU type"));
19440
19441 mcpu_cpu_opt = legacy_cpu;
19442 }
19443 else if (!mcpu_cpu_opt)
19444 mcpu_cpu_opt = march_cpu_opt;
19445
19446 if (legacy_fpu)
19447 {
19448 if (mfpu_opt)
19449 as_bad (_("use of old and new-style options to set FPU type"));
19450
19451 mfpu_opt = legacy_fpu;
19452 }
19453 else if (!mfpu_opt)
19454 {
19455 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19456 /* Some environments specify a default FPU. If they don't, infer it
19457 from the processor. */
19458 if (mcpu_fpu_opt)
19459 mfpu_opt = mcpu_fpu_opt;
19460 else
19461 mfpu_opt = march_fpu_opt;
19462 #else
19463 mfpu_opt = &fpu_default;
19464 #endif
19465 }
19466
19467 if (!mfpu_opt)
19468 {
19469 if (mcpu_cpu_opt != NULL)
19470 mfpu_opt = &fpu_default;
19471 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
19472 mfpu_opt = &fpu_arch_vfp_v2;
19473 else
19474 mfpu_opt = &fpu_arch_fpa;
19475 }
19476
19477 #ifdef CPU_DEFAULT
19478 if (!mcpu_cpu_opt)
19479 {
19480 mcpu_cpu_opt = &cpu_default;
19481 selected_cpu = cpu_default;
19482 }
19483 #else
19484 if (mcpu_cpu_opt)
19485 selected_cpu = *mcpu_cpu_opt;
19486 else
19487 mcpu_cpu_opt = &arm_arch_any;
19488 #endif
19489
19490 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19491
19492 autoselect_thumb_from_cpu_variant ();
19493
19494 arm_arch_used = thumb_arch_used = arm_arch_none;
19495
19496 #if defined OBJ_COFF || defined OBJ_ELF
19497 {
19498 unsigned int flags = 0;
19499
19500 #if defined OBJ_ELF
19501 flags = meabi_flags;
19502
19503 switch (meabi_flags)
19504 {
19505 case EF_ARM_EABI_UNKNOWN:
19506 #endif
19507 /* Set the flags in the private structure. */
19508 if (uses_apcs_26) flags |= F_APCS26;
19509 if (support_interwork) flags |= F_INTERWORK;
19510 if (uses_apcs_float) flags |= F_APCS_FLOAT;
19511 if (pic_code) flags |= F_PIC;
19512 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
19513 flags |= F_SOFT_FLOAT;
19514
19515 switch (mfloat_abi_opt)
19516 {
19517 case ARM_FLOAT_ABI_SOFT:
19518 case ARM_FLOAT_ABI_SOFTFP:
19519 flags |= F_SOFT_FLOAT;
19520 break;
19521
19522 case ARM_FLOAT_ABI_HARD:
19523 if (flags & F_SOFT_FLOAT)
19524 as_bad (_("hard-float conflicts with specified fpu"));
19525 break;
19526 }
19527
19528 /* Using pure-endian doubles (even if soft-float). */
19529 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
19530 flags |= F_VFP_FLOAT;
19531
19532 #if defined OBJ_ELF
19533 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
19534 flags |= EF_ARM_MAVERICK_FLOAT;
19535 break;
19536
19537 case EF_ARM_EABI_VER4:
19538 case EF_ARM_EABI_VER5:
19539 /* No additional flags to set. */
19540 break;
19541
19542 default:
19543 abort ();
19544 }
19545 #endif
19546 bfd_set_private_flags (stdoutput, flags);
19547
19548 /* We have run out flags in the COFF header to encode the
19549 status of ATPCS support, so instead we create a dummy,
19550 empty, debug section called .arm.atpcs. */
19551 if (atpcs)
19552 {
19553 asection * sec;
19554
19555 sec = bfd_make_section (stdoutput, ".arm.atpcs");
19556
19557 if (sec != NULL)
19558 {
19559 bfd_set_section_flags
19560 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
19561 bfd_set_section_size (stdoutput, sec, 0);
19562 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
19563 }
19564 }
19565 }
19566 #endif
19567
19568 /* Record the CPU type as well. */
19569 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
19570 mach = bfd_mach_arm_iWMMXt2;
19571 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
19572 mach = bfd_mach_arm_iWMMXt;
19573 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
19574 mach = bfd_mach_arm_XScale;
19575 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
19576 mach = bfd_mach_arm_ep9312;
19577 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
19578 mach = bfd_mach_arm_5TE;
19579 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
19580 {
19581 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19582 mach = bfd_mach_arm_5T;
19583 else
19584 mach = bfd_mach_arm_5;
19585 }
19586 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
19587 {
19588 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19589 mach = bfd_mach_arm_4T;
19590 else
19591 mach = bfd_mach_arm_4;
19592 }
19593 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
19594 mach = bfd_mach_arm_3M;
19595 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
19596 mach = bfd_mach_arm_3;
19597 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
19598 mach = bfd_mach_arm_2a;
19599 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
19600 mach = bfd_mach_arm_2;
19601 else
19602 mach = bfd_mach_arm_unknown;
19603
19604 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
19605 }
19606
19607 /* Command line processing. */
19608
19609 /* md_parse_option
19610 Invocation line includes a switch not recognized by the base assembler.
19611 See if it's a processor-specific option.
19612
19613 This routine is somewhat complicated by the need for backwards
19614 compatibility (since older releases of gcc can't be changed).
19615 The new options try to make the interface as compatible as
19616 possible with GCC.
19617
19618 New options (supported) are:
19619
19620 -mcpu=<cpu name> Assemble for selected processor
19621 -march=<architecture name> Assemble for selected architecture
19622 -mfpu=<fpu architecture> Assemble for selected FPU.
19623 -EB/-mbig-endian Big-endian
19624 -EL/-mlittle-endian Little-endian
19625 -k Generate PIC code
19626 -mthumb Start in Thumb mode
19627 -mthumb-interwork Code supports ARM/Thumb interworking
19628
19629 For now we will also provide support for:
19630
19631 -mapcs-32 32-bit Program counter
19632 -mapcs-26 26-bit Program counter
19633 -macps-float Floats passed in FP registers
19634 -mapcs-reentrant Reentrant code
19635 -matpcs
19636 (sometime these will probably be replaced with -mapcs=<list of options>
19637 and -matpcs=<list of options>)
19638
19639 The remaining options are only supported for back-wards compatibility.
19640 Cpu variants, the arm part is optional:
19641 -m[arm]1 Currently not supported.
19642 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19643 -m[arm]3 Arm 3 processor
19644 -m[arm]6[xx], Arm 6 processors
19645 -m[arm]7[xx][t][[d]m] Arm 7 processors
19646 -m[arm]8[10] Arm 8 processors
19647 -m[arm]9[20][tdmi] Arm 9 processors
19648 -mstrongarm[110[0]] StrongARM processors
19649 -mxscale XScale processors
19650 -m[arm]v[2345[t[e]]] Arm architectures
19651 -mall All (except the ARM1)
19652 FP variants:
19653 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19654 -mfpe-old (No float load/store multiples)
19655 -mvfpxd VFP Single precision
19656 -mvfp All VFP
19657 -mno-fpu Disable all floating point instructions
19658
19659 The following CPU names are recognized:
19660 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19661 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19662 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19663 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19664 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19665 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19666 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19667
19668 */
19669
19670 const char * md_shortopts = "m:k";
19671
19672 #ifdef ARM_BI_ENDIAN
19673 #define OPTION_EB (OPTION_MD_BASE + 0)
19674 #define OPTION_EL (OPTION_MD_BASE + 1)
19675 #else
19676 #if TARGET_BYTES_BIG_ENDIAN
19677 #define OPTION_EB (OPTION_MD_BASE + 0)
19678 #else
19679 #define OPTION_EL (OPTION_MD_BASE + 1)
19680 #endif
19681 #endif
19682
19683 struct option md_longopts[] =
19684 {
19685 #ifdef OPTION_EB
19686 {"EB", no_argument, NULL, OPTION_EB},
19687 #endif
19688 #ifdef OPTION_EL
19689 {"EL", no_argument, NULL, OPTION_EL},
19690 #endif
19691 {NULL, no_argument, NULL, 0}
19692 };
19693
19694 size_t md_longopts_size = sizeof (md_longopts);
19695
19696 struct arm_option_table
19697 {
19698 char *option; /* Option name to match. */
19699 char *help; /* Help information. */
19700 int *var; /* Variable to change. */
19701 int value; /* What to change it to. */
19702 char *deprecated; /* If non-null, print this message. */
19703 };
19704
19705 struct arm_option_table arm_opts[] =
19706 {
19707 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
19708 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
19709 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19710 &support_interwork, 1, NULL},
19711 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
19712 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
19713 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
19714 1, NULL},
19715 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
19716 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
19717 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
19718 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
19719 NULL},
19720
19721 /* These are recognized by the assembler, but have no affect on code. */
19722 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
19723 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
19724 {NULL, NULL, NULL, 0, NULL}
19725 };
19726
19727 struct arm_legacy_option_table
19728 {
19729 char *option; /* Option name to match. */
19730 const arm_feature_set **var; /* Variable to change. */
19731 const arm_feature_set value; /* What to change it to. */
19732 char *deprecated; /* If non-null, print this message. */
19733 };
19734
19735 const struct arm_legacy_option_table arm_legacy_opts[] =
19736 {
19737 /* DON'T add any new processors to this list -- we want the whole list
19738 to go away... Add them to the processors table instead. */
19739 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19740 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19741 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19742 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19743 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19744 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19745 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19746 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19747 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19748 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19749 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19750 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19751 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19752 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19753 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19754 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19755 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19756 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19757 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19758 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19759 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19760 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19761 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19762 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19763 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19764 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19765 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19766 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19767 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19768 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19769 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19770 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19771 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19772 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19773 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19774 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19775 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19776 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19777 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19778 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19779 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19780 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19781 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19782 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19783 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19784 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19785 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19786 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19787 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19788 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19789 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19790 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19791 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19792 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19793 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19794 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19795 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19796 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19797 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19798 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19799 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19800 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19801 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19802 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19803 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19804 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19805 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19806 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19807 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
19808 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
19809 N_("use -mcpu=strongarm110")},
19810 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
19811 N_("use -mcpu=strongarm1100")},
19812 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
19813 N_("use -mcpu=strongarm1110")},
19814 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
19815 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
19816 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
19817
19818 /* Architecture variants -- don't add any more to this list either. */
19819 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19820 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19821 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19822 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19823 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19824 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19825 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19826 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19827 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19828 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19829 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19830 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19831 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19832 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19833 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19834 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19835 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19836 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19837
19838 /* Floating point variants -- don't add any more to this list either. */
19839 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
19840 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
19841 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
19842 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
19843 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19844
19845 {NULL, NULL, ARM_ARCH_NONE, NULL}
19846 };
19847
19848 struct arm_cpu_option_table
19849 {
19850 char *name;
19851 const arm_feature_set value;
19852 /* For some CPUs we assume an FPU unless the user explicitly sets
19853 -mfpu=... */
19854 const arm_feature_set default_fpu;
19855 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19856 case. */
19857 const char *canonical_name;
19858 };
19859
19860 /* This list should, at a minimum, contain all the cpu names
19861 recognized by GCC. */
19862 static const struct arm_cpu_option_table arm_cpus[] =
19863 {
19864 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
19865 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
19866 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
19867 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19868 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19869 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19870 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19871 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19872 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19873 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19874 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19875 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19876 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19877 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19878 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19879 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19880 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19881 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19882 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19883 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19884 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19885 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19886 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19887 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19888 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19889 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19890 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19891 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19892 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19893 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19894 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19895 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19896 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19897 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19898 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19899 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19900 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19901 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19902 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19903 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
19904 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19905 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19906 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19907 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19908 /* For V5 or later processors we default to using VFP; but the user
19909 should really set the FPU type explicitly. */
19910 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19911 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19912 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19913 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19914 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19915 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19916 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
19917 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19918 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19919 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
19920 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19921 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19922 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19923 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19924 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19925 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
19926 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19927 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19928 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19929 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
19930 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19931 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
19932 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
19933 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
19934 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
19935 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
19936 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
19937 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
19938 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
19939 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
19940 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
19941 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
19942 | FPU_NEON_EXT_V1),
19943 NULL},
19944 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
19945 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
19946 /* ??? XSCALE is really an architecture. */
19947 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19948 /* ??? iwmmxt is not a processor. */
19949 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
19950 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
19951 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19952 /* Maverick */
19953 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
19954 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
19955 };
19956
19957 struct arm_arch_option_table
19958 {
19959 char *name;
19960 const arm_feature_set value;
19961 const arm_feature_set default_fpu;
19962 };
19963
19964 /* This list should, at a minimum, contain all the architecture names
19965 recognized by GCC. */
19966 static const struct arm_arch_option_table arm_archs[] =
19967 {
19968 {"all", ARM_ANY, FPU_ARCH_FPA},
19969 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
19970 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
19971 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
19972 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
19973 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
19974 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
19975 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
19976 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
19977 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
19978 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
19979 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
19980 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
19981 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
19982 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
19983 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
19984 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
19985 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
19986 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
19987 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
19988 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
19989 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
19990 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
19991 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
19992 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
19993 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
19994 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
19995 /* The official spelling of the ARMv7 profile variants is the dashed form.
19996 Accept the non-dashed form for compatibility with old toolchains. */
19997 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19998 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19999 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20000 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
20001 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
20002 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20003 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
20004 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
20005 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
20006 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
20007 };
20008
20009 /* ISA extensions in the co-processor space. */
20010 struct arm_option_cpu_value_table
20011 {
20012 char *name;
20013 const arm_feature_set value;
20014 };
20015
20016 static const struct arm_option_cpu_value_table arm_extensions[] =
20017 {
20018 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
20019 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
20020 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
20021 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
20022 {NULL, ARM_ARCH_NONE}
20023 };
20024
20025 /* This list should, at a minimum, contain all the fpu names
20026 recognized by GCC. */
20027 static const struct arm_option_cpu_value_table arm_fpus[] =
20028 {
20029 {"softfpa", FPU_NONE},
20030 {"fpe", FPU_ARCH_FPE},
20031 {"fpe2", FPU_ARCH_FPE},
20032 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
20033 {"fpa", FPU_ARCH_FPA},
20034 {"fpa10", FPU_ARCH_FPA},
20035 {"fpa11", FPU_ARCH_FPA},
20036 {"arm7500fe", FPU_ARCH_FPA},
20037 {"softvfp", FPU_ARCH_VFP},
20038 {"softvfp+vfp", FPU_ARCH_VFP_V2},
20039 {"vfp", FPU_ARCH_VFP_V2},
20040 {"vfp9", FPU_ARCH_VFP_V2},
20041 {"vfp3", FPU_ARCH_VFP_V3},
20042 {"vfp10", FPU_ARCH_VFP_V2},
20043 {"vfp10-r0", FPU_ARCH_VFP_V1},
20044 {"vfpxd", FPU_ARCH_VFP_V1xD},
20045 {"arm1020t", FPU_ARCH_VFP_V1},
20046 {"arm1020e", FPU_ARCH_VFP_V2},
20047 {"arm1136jfs", FPU_ARCH_VFP_V2},
20048 {"arm1136jf-s", FPU_ARCH_VFP_V2},
20049 {"maverick", FPU_ARCH_MAVERICK},
20050 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
20051 {NULL, ARM_ARCH_NONE}
20052 };
20053
20054 struct arm_option_value_table
20055 {
20056 char *name;
20057 long value;
20058 };
20059
20060 static const struct arm_option_value_table arm_float_abis[] =
20061 {
20062 {"hard", ARM_FLOAT_ABI_HARD},
20063 {"softfp", ARM_FLOAT_ABI_SOFTFP},
20064 {"soft", ARM_FLOAT_ABI_SOFT},
20065 {NULL, 0}
20066 };
20067
20068 #ifdef OBJ_ELF
20069 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
20070 static const struct arm_option_value_table arm_eabis[] =
20071 {
20072 {"gnu", EF_ARM_EABI_UNKNOWN},
20073 {"4", EF_ARM_EABI_VER4},
20074 {"5", EF_ARM_EABI_VER5},
20075 {NULL, 0}
20076 };
20077 #endif
20078
20079 struct arm_long_option_table
20080 {
20081 char * option; /* Substring to match. */
20082 char * help; /* Help information. */
20083 int (* func) (char * subopt); /* Function to decode sub-option. */
20084 char * deprecated; /* If non-null, print this message. */
20085 };
20086
20087 static int
20088 arm_parse_extension (char * str, const arm_feature_set **opt_p)
20089 {
20090 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
20091
20092 /* Copy the feature set, so that we can modify it. */
20093 *ext_set = **opt_p;
20094 *opt_p = ext_set;
20095
20096 while (str != NULL && *str != 0)
20097 {
20098 const struct arm_option_cpu_value_table * opt;
20099 char * ext;
20100 int optlen;
20101
20102 if (*str != '+')
20103 {
20104 as_bad (_("invalid architectural extension"));
20105 return 0;
20106 }
20107
20108 str++;
20109 ext = strchr (str, '+');
20110
20111 if (ext != NULL)
20112 optlen = ext - str;
20113 else
20114 optlen = strlen (str);
20115
20116 if (optlen == 0)
20117 {
20118 as_bad (_("missing architectural extension"));
20119 return 0;
20120 }
20121
20122 for (opt = arm_extensions; opt->name != NULL; opt++)
20123 if (strncmp (opt->name, str, optlen) == 0)
20124 {
20125 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
20126 break;
20127 }
20128
20129 if (opt->name == NULL)
20130 {
20131 as_bad (_("unknown architectural extnsion `%s'"), str);
20132 return 0;
20133 }
20134
20135 str = ext;
20136 };
20137
20138 return 1;
20139 }
20140
20141 static int
20142 arm_parse_cpu (char * str)
20143 {
20144 const struct arm_cpu_option_table * opt;
20145 char * ext = strchr (str, '+');
20146 int optlen;
20147
20148 if (ext != NULL)
20149 optlen = ext - str;
20150 else
20151 optlen = strlen (str);
20152
20153 if (optlen == 0)
20154 {
20155 as_bad (_("missing cpu name `%s'"), str);
20156 return 0;
20157 }
20158
20159 for (opt = arm_cpus; opt->name != NULL; opt++)
20160 if (strncmp (opt->name, str, optlen) == 0)
20161 {
20162 mcpu_cpu_opt = &opt->value;
20163 mcpu_fpu_opt = &opt->default_fpu;
20164 if (opt->canonical_name)
20165 strcpy(selected_cpu_name, opt->canonical_name);
20166 else
20167 {
20168 int i;
20169 for (i = 0; i < optlen; i++)
20170 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20171 selected_cpu_name[i] = 0;
20172 }
20173
20174 if (ext != NULL)
20175 return arm_parse_extension (ext, &mcpu_cpu_opt);
20176
20177 return 1;
20178 }
20179
20180 as_bad (_("unknown cpu `%s'"), str);
20181 return 0;
20182 }
20183
20184 static int
20185 arm_parse_arch (char * str)
20186 {
20187 const struct arm_arch_option_table *opt;
20188 char *ext = strchr (str, '+');
20189 int optlen;
20190
20191 if (ext != NULL)
20192 optlen = ext - str;
20193 else
20194 optlen = strlen (str);
20195
20196 if (optlen == 0)
20197 {
20198 as_bad (_("missing architecture name `%s'"), str);
20199 return 0;
20200 }
20201
20202 for (opt = arm_archs; opt->name != NULL; opt++)
20203 if (streq (opt->name, str))
20204 {
20205 march_cpu_opt = &opt->value;
20206 march_fpu_opt = &opt->default_fpu;
20207 strcpy(selected_cpu_name, opt->name);
20208
20209 if (ext != NULL)
20210 return arm_parse_extension (ext, &march_cpu_opt);
20211
20212 return 1;
20213 }
20214
20215 as_bad (_("unknown architecture `%s'\n"), str);
20216 return 0;
20217 }
20218
20219 static int
20220 arm_parse_fpu (char * str)
20221 {
20222 const struct arm_option_cpu_value_table * opt;
20223
20224 for (opt = arm_fpus; opt->name != NULL; opt++)
20225 if (streq (opt->name, str))
20226 {
20227 mfpu_opt = &opt->value;
20228 return 1;
20229 }
20230
20231 as_bad (_("unknown floating point format `%s'\n"), str);
20232 return 0;
20233 }
20234
20235 static int
20236 arm_parse_float_abi (char * str)
20237 {
20238 const struct arm_option_value_table * opt;
20239
20240 for (opt = arm_float_abis; opt->name != NULL; opt++)
20241 if (streq (opt->name, str))
20242 {
20243 mfloat_abi_opt = opt->value;
20244 return 1;
20245 }
20246
20247 as_bad (_("unknown floating point abi `%s'\n"), str);
20248 return 0;
20249 }
20250
20251 #ifdef OBJ_ELF
20252 static int
20253 arm_parse_eabi (char * str)
20254 {
20255 const struct arm_option_value_table *opt;
20256
20257 for (opt = arm_eabis; opt->name != NULL; opt++)
20258 if (streq (opt->name, str))
20259 {
20260 meabi_flags = opt->value;
20261 return 1;
20262 }
20263 as_bad (_("unknown EABI `%s'\n"), str);
20264 return 0;
20265 }
20266 #endif
20267
20268 struct arm_long_option_table arm_long_opts[] =
20269 {
20270 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20271 arm_parse_cpu, NULL},
20272 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20273 arm_parse_arch, NULL},
20274 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20275 arm_parse_fpu, NULL},
20276 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20277 arm_parse_float_abi, NULL},
20278 #ifdef OBJ_ELF
20279 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20280 arm_parse_eabi, NULL},
20281 #endif
20282 {NULL, NULL, 0, NULL}
20283 };
20284
20285 int
20286 md_parse_option (int c, char * arg)
20287 {
20288 struct arm_option_table *opt;
20289 const struct arm_legacy_option_table *fopt;
20290 struct arm_long_option_table *lopt;
20291
20292 switch (c)
20293 {
20294 #ifdef OPTION_EB
20295 case OPTION_EB:
20296 target_big_endian = 1;
20297 break;
20298 #endif
20299
20300 #ifdef OPTION_EL
20301 case OPTION_EL:
20302 target_big_endian = 0;
20303 break;
20304 #endif
20305
20306 case 'a':
20307 /* Listing option. Just ignore these, we don't support additional
20308 ones. */
20309 return 0;
20310
20311 default:
20312 for (opt = arm_opts; opt->option != NULL; opt++)
20313 {
20314 if (c == opt->option[0]
20315 && ((arg == NULL && opt->option[1] == 0)
20316 || streq (arg, opt->option + 1)))
20317 {
20318 #if WARN_DEPRECATED
20319 /* If the option is deprecated, tell the user. */
20320 if (opt->deprecated != NULL)
20321 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20322 arg ? arg : "", _(opt->deprecated));
20323 #endif
20324
20325 if (opt->var != NULL)
20326 *opt->var = opt->value;
20327
20328 return 1;
20329 }
20330 }
20331
20332 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
20333 {
20334 if (c == fopt->option[0]
20335 && ((arg == NULL && fopt->option[1] == 0)
20336 || streq (arg, fopt->option + 1)))
20337 {
20338 #if WARN_DEPRECATED
20339 /* If the option is deprecated, tell the user. */
20340 if (fopt->deprecated != NULL)
20341 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20342 arg ? arg : "", _(fopt->deprecated));
20343 #endif
20344
20345 if (fopt->var != NULL)
20346 *fopt->var = &fopt->value;
20347
20348 return 1;
20349 }
20350 }
20351
20352 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20353 {
20354 /* These options are expected to have an argument. */
20355 if (c == lopt->option[0]
20356 && arg != NULL
20357 && strncmp (arg, lopt->option + 1,
20358 strlen (lopt->option + 1)) == 0)
20359 {
20360 #if WARN_DEPRECATED
20361 /* If the option is deprecated, tell the user. */
20362 if (lopt->deprecated != NULL)
20363 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
20364 _(lopt->deprecated));
20365 #endif
20366
20367 /* Call the sup-option parser. */
20368 return lopt->func (arg + strlen (lopt->option) - 1);
20369 }
20370 }
20371
20372 return 0;
20373 }
20374
20375 return 1;
20376 }
20377
20378 void
20379 md_show_usage (FILE * fp)
20380 {
20381 struct arm_option_table *opt;
20382 struct arm_long_option_table *lopt;
20383
20384 fprintf (fp, _(" ARM-specific assembler options:\n"));
20385
20386 for (opt = arm_opts; opt->option != NULL; opt++)
20387 if (opt->help != NULL)
20388 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
20389
20390 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20391 if (lopt->help != NULL)
20392 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
20393
20394 #ifdef OPTION_EB
20395 fprintf (fp, _("\
20396 -EB assemble code for a big-endian cpu\n"));
20397 #endif
20398
20399 #ifdef OPTION_EL
20400 fprintf (fp, _("\
20401 -EL assemble code for a little-endian cpu\n"));
20402 #endif
20403 }
20404
20405
20406 #ifdef OBJ_ELF
20407 typedef struct
20408 {
20409 int val;
20410 arm_feature_set flags;
20411 } cpu_arch_ver_table;
20412
20413 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20414 least features first. */
20415 static const cpu_arch_ver_table cpu_arch_ver[] =
20416 {
20417 {1, ARM_ARCH_V4},
20418 {2, ARM_ARCH_V4T},
20419 {3, ARM_ARCH_V5},
20420 {4, ARM_ARCH_V5TE},
20421 {5, ARM_ARCH_V5TEJ},
20422 {6, ARM_ARCH_V6},
20423 {7, ARM_ARCH_V6Z},
20424 {8, ARM_ARCH_V6K},
20425 {9, ARM_ARCH_V6T2},
20426 {10, ARM_ARCH_V7A},
20427 {10, ARM_ARCH_V7R},
20428 {10, ARM_ARCH_V7M},
20429 {0, ARM_ARCH_NONE}
20430 };
20431
20432 /* Set the public EABI object attributes. */
20433 static void
20434 aeabi_set_public_attributes (void)
20435 {
20436 int arch;
20437 arm_feature_set flags;
20438 arm_feature_set tmp;
20439 const cpu_arch_ver_table *p;
20440
20441 /* Choose the architecture based on the capabilities of the requested cpu
20442 (if any) and/or the instructions actually used. */
20443 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
20444 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
20445 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
20446 /*Allow the user to override the reported architecture. */
20447 if (object_arch)
20448 {
20449 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
20450 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
20451 }
20452
20453 tmp = flags;
20454 arch = 0;
20455 for (p = cpu_arch_ver; p->val; p++)
20456 {
20457 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
20458 {
20459 arch = p->val;
20460 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
20461 }
20462 }
20463
20464 /* Tag_CPU_name. */
20465 if (selected_cpu_name[0])
20466 {
20467 char *p;
20468
20469 p = selected_cpu_name;
20470 if (strncmp(p, "armv", 4) == 0)
20471 {
20472 int i;
20473
20474 p += 4;
20475 for (i = 0; p[i]; i++)
20476 p[i] = TOUPPER (p[i]);
20477 }
20478 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
20479 }
20480 /* Tag_CPU_arch. */
20481 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
20482 /* Tag_CPU_arch_profile. */
20483 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
20484 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
20485 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
20486 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
20487 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
20488 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
20489 /* Tag_ARM_ISA_use. */
20490 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
20491 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
20492 /* Tag_THUMB_ISA_use. */
20493 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
20494 elf32_arm_add_eabi_attr_int (stdoutput, 9,
20495 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
20496 /* Tag_VFP_arch. */
20497 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
20498 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
20499 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
20500 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
20501 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
20502 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
20503 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
20504 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
20505 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
20506 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
20507 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
20508 /* Tag_WMMX_arch. */
20509 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
20510 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
20511 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
20512 /* Tag_NEON_arch. */
20513 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
20514 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
20515 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
20516 }
20517
20518 /* Add the .ARM.attributes section. */
20519 void
20520 arm_md_end (void)
20521 {
20522 segT s;
20523 char *p;
20524 addressT addr;
20525 offsetT size;
20526
20527 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20528 return;
20529
20530 aeabi_set_public_attributes ();
20531 size = elf32_arm_eabi_attr_size (stdoutput);
20532 s = subseg_new (".ARM.attributes", 0);
20533 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
20534 addr = frag_now_fix ();
20535 p = frag_more (size);
20536 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
20537 }
20538 #endif /* OBJ_ELF */
20539
20540
20541 /* Parse a .cpu directive. */
20542
20543 static void
20544 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
20545 {
20546 const struct arm_cpu_option_table *opt;
20547 char *name;
20548 char saved_char;
20549
20550 name = input_line_pointer;
20551 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20552 input_line_pointer++;
20553 saved_char = *input_line_pointer;
20554 *input_line_pointer = 0;
20555
20556 /* Skip the first "all" entry. */
20557 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
20558 if (streq (opt->name, name))
20559 {
20560 mcpu_cpu_opt = &opt->value;
20561 selected_cpu = opt->value;
20562 if (opt->canonical_name)
20563 strcpy(selected_cpu_name, opt->canonical_name);
20564 else
20565 {
20566 int i;
20567 for (i = 0; opt->name[i]; i++)
20568 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20569 selected_cpu_name[i] = 0;
20570 }
20571 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20572 *input_line_pointer = saved_char;
20573 demand_empty_rest_of_line ();
20574 return;
20575 }
20576 as_bad (_("unknown cpu `%s'"), name);
20577 *input_line_pointer = saved_char;
20578 ignore_rest_of_line ();
20579 }
20580
20581
20582 /* Parse a .arch directive. */
20583
20584 static void
20585 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
20586 {
20587 const struct arm_arch_option_table *opt;
20588 char saved_char;
20589 char *name;
20590
20591 name = input_line_pointer;
20592 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20593 input_line_pointer++;
20594 saved_char = *input_line_pointer;
20595 *input_line_pointer = 0;
20596
20597 /* Skip the first "all" entry. */
20598 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20599 if (streq (opt->name, name))
20600 {
20601 mcpu_cpu_opt = &opt->value;
20602 selected_cpu = opt->value;
20603 strcpy(selected_cpu_name, opt->name);
20604 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20605 *input_line_pointer = saved_char;
20606 demand_empty_rest_of_line ();
20607 return;
20608 }
20609
20610 as_bad (_("unknown architecture `%s'\n"), name);
20611 *input_line_pointer = saved_char;
20612 ignore_rest_of_line ();
20613 }
20614
20615
20616 /* Parse a .object_arch directive. */
20617
20618 static void
20619 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
20620 {
20621 const struct arm_arch_option_table *opt;
20622 char saved_char;
20623 char *name;
20624
20625 name = input_line_pointer;
20626 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20627 input_line_pointer++;
20628 saved_char = *input_line_pointer;
20629 *input_line_pointer = 0;
20630
20631 /* Skip the first "all" entry. */
20632 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20633 if (streq (opt->name, name))
20634 {
20635 object_arch = &opt->value;
20636 *input_line_pointer = saved_char;
20637 demand_empty_rest_of_line ();
20638 return;
20639 }
20640
20641 as_bad (_("unknown architecture `%s'\n"), name);
20642 *input_line_pointer = saved_char;
20643 ignore_rest_of_line ();
20644 }
20645
20646
20647 /* Parse a .fpu directive. */
20648
20649 static void
20650 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
20651 {
20652 const struct arm_option_cpu_value_table *opt;
20653 char saved_char;
20654 char *name;
20655
20656 name = input_line_pointer;
20657 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20658 input_line_pointer++;
20659 saved_char = *input_line_pointer;
20660 *input_line_pointer = 0;
20661
20662 for (opt = arm_fpus; opt->name != NULL; opt++)
20663 if (streq (opt->name, name))
20664 {
20665 mfpu_opt = &opt->value;
20666 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20667 *input_line_pointer = saved_char;
20668 demand_empty_rest_of_line ();
20669 return;
20670 }
20671
20672 as_bad (_("unknown floating point format `%s'\n"), name);
20673 *input_line_pointer = saved_char;
20674 ignore_rest_of_line ();
20675 }
20676
20677 /* Copy symbol information. */
20678 void
20679 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
20680 {
20681 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
20682 }