Apply fixes to allow arm WinCE toolchain to produce working executables.
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <string.h>
29 #include <limits.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33
34 /* Need TARGET_CPU. */
35 #include "config.h"
36 #include "subsegs.h"
37 #include "obstack.h"
38 #include "symbols.h"
39 #include "listing.h"
40
41 #include "opcode/arm.h"
42
43 #ifdef OBJ_ELF
44 #include "elf/arm.h"
45 #include "dwarf2dbg.h"
46 #include "dw2gencfi.h"
47 #endif
48
49 /* XXX Set this to 1 after the next binutils release. */
50 #define WARN_DEPRECATED 0
51
52 #ifdef OBJ_ELF
53 /* Must be at least the size of the largest unwind opcode (currently two). */
54 #define ARM_OPCODE_CHUNK_SIZE 8
55
56 /* This structure holds the unwinding state. */
57
58 static struct
59 {
60 symbolS * proc_start;
61 symbolS * table_entry;
62 symbolS * personality_routine;
63 int personality_index;
64 /* The segment containing the function. */
65 segT saved_seg;
66 subsegT saved_subseg;
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes;
69 int opcode_count;
70 int opcode_alloc;
71 /* The number of bytes pushed to the stack. */
72 offsetT frame_size;
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
79 offsetT fp_offset;
80 int fp_reg;
81 /* Nonzero if an unwind_setfp directive has been seen. */
82 unsigned fp_used:1;
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored:1;
85 } unwind;
86
87 /* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90 static unsigned int marked_pr_dependency = 0;
91
92 #endif /* OBJ_ELF */
93
94 enum arm_float_abi
95 {
96 ARM_FLOAT_ABI_HARD,
97 ARM_FLOAT_ABI_SOFTFP,
98 ARM_FLOAT_ABI_SOFT
99 };
100
101 /* Types of processor to assemble for. */
102 #ifndef CPU_DEFAULT
103 #if defined __XSCALE__
104 #define CPU_DEFAULT ARM_ARCH_XSCALE
105 #else
106 #if defined __thumb__
107 #define CPU_DEFAULT ARM_ARCH_V5T
108 #endif
109 #endif
110 #endif
111
112 #ifndef FPU_DEFAULT
113 # ifdef TE_LINUX
114 # define FPU_DEFAULT FPU_ARCH_FPA
115 # elif defined (TE_NetBSD)
116 # ifdef OBJ_ELF
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
118 # else
119 /* Legacy a.out format. */
120 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
121 # endif
122 # elif defined (TE_VXWORKS)
123 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
124 # else
125 /* For backwards compatibility, default to FPA. */
126 # define FPU_DEFAULT FPU_ARCH_FPA
127 # endif
128 #endif /* ifndef FPU_DEFAULT */
129
130 #define streq(a, b) (strcmp (a, b) == 0)
131
132 static arm_feature_set cpu_variant;
133 static arm_feature_set arm_arch_used;
134 static arm_feature_set thumb_arch_used;
135
136 /* Flags stored in private area of BFD structure. */
137 static int uses_apcs_26 = FALSE;
138 static int atpcs = FALSE;
139 static int support_interwork = FALSE;
140 static int uses_apcs_float = FALSE;
141 static int pic_code = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154
155 /* Constants for known architecture features. */
156 static const arm_feature_set fpu_default = FPU_DEFAULT;
157 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
158 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
159 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
160 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
161 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
162 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
163 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
164 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
165
166 #ifdef CPU_DEFAULT
167 static const arm_feature_set cpu_default = CPU_DEFAULT;
168 #endif
169
170 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
171 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
173 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
174 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
175 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
176 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
177 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
178 static const arm_feature_set arm_ext_v4t_5 =
179 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
180 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
181 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
182 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
183 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
184 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
185 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
186 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
189 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
190 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
191 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
192 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
193 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
194
195 static const arm_feature_set arm_arch_any = ARM_ANY;
196 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
197 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
198 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
199
200 static const arm_feature_set arm_cext_iwmmxt =
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
202 static const arm_feature_set arm_cext_xscale =
203 ARM_FEATURE (0, ARM_CEXT_XSCALE);
204 static const arm_feature_set arm_cext_maverick =
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
206 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
207 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
208 static const arm_feature_set fpu_vfp_ext_v1xd =
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
210 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
211 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
212 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
213 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
214 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
215 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
216
217 static int mfloat_abi_opt = -1;
218 /* Record user cpu selection for object attributes. */
219 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
220 /* Must be long enough to hold any of the names in arm_cpus. */
221 static char selected_cpu_name[16];
222 #ifdef OBJ_ELF
223 # ifdef EABI_DEFAULT
224 static int meabi_flags = EABI_DEFAULT;
225 # else
226 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
227 # endif
228 #endif
229
230 #ifdef OBJ_ELF
231 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232 symbolS * GOT_symbol;
233 #endif
234
235 /* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
238 instructions. */
239 static int thumb_mode = 0;
240
241 /* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
243
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
248 there.)
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
251 machine code.
252
253 Important differences from the old Thumb mode:
254
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
263
264 static bfd_boolean unified_syntax = FALSE;
265
266 enum neon_el_type
267 {
268 NT_invtype,
269 NT_untyped,
270 NT_integer,
271 NT_float,
272 NT_poly,
273 NT_signed,
274 NT_unsigned
275 };
276
277 struct neon_type_el
278 {
279 enum neon_el_type type;
280 unsigned size;
281 };
282
283 #define NEON_MAX_TYPE_ELS 4
284
285 struct neon_type
286 {
287 struct neon_type_el el[NEON_MAX_TYPE_ELS];
288 unsigned elems;
289 };
290
291 struct arm_it
292 {
293 const char * error;
294 unsigned long instruction;
295 int size;
296 int size_req;
297 int cond;
298 struct neon_type vectype;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
301 unsigned long relax;
302 struct
303 {
304 bfd_reloc_code_real_type type;
305 expressionS exp;
306 int pc_rel;
307 } reloc;
308
309 struct
310 {
311 unsigned reg;
312 signed int imm;
313 struct neon_type_el vectype;
314 unsigned present : 1; /* Operand present. */
315 unsigned isreg : 1; /* Operand was a register. */
316 unsigned immisreg : 1; /* .imm field is a second register. */
317 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
318 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
319 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
320 instructions. This allows us to disambiguate ARM <-> vector insns. */
321 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
322 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
323 unsigned hasreloc : 1; /* Operand has relocation suffix. */
324 unsigned writeback : 1; /* Operand has trailing ! */
325 unsigned preind : 1; /* Preindexed address. */
326 unsigned postind : 1; /* Postindexed address. */
327 unsigned negative : 1; /* Index register was negated. */
328 unsigned shifted : 1; /* Shift applied to operation. */
329 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
330 } operands[6];
331 };
332
333 static struct arm_it inst;
334
335 #define NUM_FLOAT_VALS 8
336
337 const char * fp_const[] =
338 {
339 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
340 };
341
342 /* Number of littlenums required to hold an extended precision number. */
343 #define MAX_LITTLENUMS 6
344
345 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
346
347 #define FAIL (-1)
348 #define SUCCESS (0)
349
350 #define SUFF_S 1
351 #define SUFF_D 2
352 #define SUFF_E 3
353 #define SUFF_P 4
354
355 #define CP_T_X 0x00008000
356 #define CP_T_Y 0x00400000
357
358 #define CONDS_BIT 0x00100000
359 #define LOAD_BIT 0x00100000
360
361 #define DOUBLE_LOAD_FLAG 0x00000001
362
363 struct asm_cond
364 {
365 const char * template;
366 unsigned long value;
367 };
368
369 #define COND_ALWAYS 0xE
370
371 struct asm_psr
372 {
373 const char *template;
374 unsigned long field;
375 };
376
377 struct asm_barrier_opt
378 {
379 const char *template;
380 unsigned long value;
381 };
382
383 /* The bit that distinguishes CPSR and SPSR. */
384 #define SPSR_BIT (1 << 22)
385
386 /* The individual PSR flag bits. */
387 #define PSR_c (1 << 16)
388 #define PSR_x (1 << 17)
389 #define PSR_s (1 << 18)
390 #define PSR_f (1 << 19)
391
392 struct reloc_entry
393 {
394 char *name;
395 bfd_reloc_code_real_type reloc;
396 };
397
398 enum vfp_reg_pos
399 {
400 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
401 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
402 };
403
404 enum vfp_ldstm_type
405 {
406 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
407 };
408
409 /* Bits for DEFINED field in neon_typed_alias. */
410 #define NTA_HASTYPE 1
411 #define NTA_HASINDEX 2
412
413 struct neon_typed_alias
414 {
415 unsigned char defined;
416 unsigned char index;
417 struct neon_type_el eltype;
418 };
419
420 /* ARM register categories. This includes coprocessor numbers and various
421 architecture extensions' registers. */
422 enum arm_reg_type
423 {
424 REG_TYPE_RN,
425 REG_TYPE_CP,
426 REG_TYPE_CN,
427 REG_TYPE_FN,
428 REG_TYPE_VFS,
429 REG_TYPE_VFD,
430 REG_TYPE_NQ,
431 REG_TYPE_NDQ,
432 REG_TYPE_VFC,
433 REG_TYPE_MVF,
434 REG_TYPE_MVD,
435 REG_TYPE_MVFX,
436 REG_TYPE_MVDX,
437 REG_TYPE_MVAX,
438 REG_TYPE_DSPSC,
439 REG_TYPE_MMXWR,
440 REG_TYPE_MMXWC,
441 REG_TYPE_MMXWCG,
442 REG_TYPE_XSCALE,
443 };
444
445 /* Structure for a hash table entry for a register.
446 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
447 information which states whether a vector type or index is specified (for a
448 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
449 struct reg_entry
450 {
451 const char *name;
452 unsigned char number;
453 unsigned char type;
454 unsigned char builtin;
455 struct neon_typed_alias *neon;
456 };
457
458 /* Diagnostics used when we don't get a register of the expected type. */
459 const char *const reg_expected_msgs[] =
460 {
461 N_("ARM register expected"),
462 N_("bad or missing co-processor number"),
463 N_("co-processor register expected"),
464 N_("FPA register expected"),
465 N_("VFP single precision register expected"),
466 N_("VFP/Neon double precision register expected"),
467 N_("Neon quad precision register expected"),
468 N_("Neon double or quad precision register expected"),
469 N_("VFP system register expected"),
470 N_("Maverick MVF register expected"),
471 N_("Maverick MVD register expected"),
472 N_("Maverick MVFX register expected"),
473 N_("Maverick MVDX register expected"),
474 N_("Maverick MVAX register expected"),
475 N_("Maverick DSPSC register expected"),
476 N_("iWMMXt data register expected"),
477 N_("iWMMXt control register expected"),
478 N_("iWMMXt scalar register expected"),
479 N_("XScale accumulator register expected"),
480 };
481
482 /* Some well known registers that we refer to directly elsewhere. */
483 #define REG_SP 13
484 #define REG_LR 14
485 #define REG_PC 15
486
487 /* ARM instructions take 4bytes in the object file, Thumb instructions
488 take 2: */
489 #define INSN_SIZE 4
490
491 struct asm_opcode
492 {
493 /* Basic string to match. */
494 const char *template;
495
496 /* Parameters to instruction. */
497 unsigned char operands[8];
498
499 /* Conditional tag - see opcode_lookup. */
500 unsigned int tag : 4;
501
502 /* Basic instruction code. */
503 unsigned int avalue : 28;
504
505 /* Thumb-format instruction code. */
506 unsigned int tvalue;
507
508 /* Which architecture variant provides this instruction. */
509 const arm_feature_set *avariant;
510 const arm_feature_set *tvariant;
511
512 /* Function to call to encode instruction in ARM format. */
513 void (* aencode) (void);
514
515 /* Function to call to encode instruction in Thumb format. */
516 void (* tencode) (void);
517 };
518
519 /* Defines for various bits that we will want to toggle. */
520 #define INST_IMMEDIATE 0x02000000
521 #define OFFSET_REG 0x02000000
522 #define HWOFFSET_IMM 0x00400000
523 #define SHIFT_BY_REG 0x00000010
524 #define PRE_INDEX 0x01000000
525 #define INDEX_UP 0x00800000
526 #define WRITE_BACK 0x00200000
527 #define LDM_TYPE_2_OR_3 0x00400000
528
529 #define LITERAL_MASK 0xf000f000
530 #define OPCODE_MASK 0xfe1fffff
531 #define V4_STR_BIT 0x00000020
532
533 #define DATA_OP_SHIFT 21
534
535 #define T2_OPCODE_MASK 0xfe1fffff
536 #define T2_DATA_OP_SHIFT 21
537
538 /* Codes to distinguish the arithmetic instructions. */
539 #define OPCODE_AND 0
540 #define OPCODE_EOR 1
541 #define OPCODE_SUB 2
542 #define OPCODE_RSB 3
543 #define OPCODE_ADD 4
544 #define OPCODE_ADC 5
545 #define OPCODE_SBC 6
546 #define OPCODE_RSC 7
547 #define OPCODE_TST 8
548 #define OPCODE_TEQ 9
549 #define OPCODE_CMP 10
550 #define OPCODE_CMN 11
551 #define OPCODE_ORR 12
552 #define OPCODE_MOV 13
553 #define OPCODE_BIC 14
554 #define OPCODE_MVN 15
555
556 #define T2_OPCODE_AND 0
557 #define T2_OPCODE_BIC 1
558 #define T2_OPCODE_ORR 2
559 #define T2_OPCODE_ORN 3
560 #define T2_OPCODE_EOR 4
561 #define T2_OPCODE_ADD 8
562 #define T2_OPCODE_ADC 10
563 #define T2_OPCODE_SBC 11
564 #define T2_OPCODE_SUB 13
565 #define T2_OPCODE_RSB 14
566
567 #define T_OPCODE_MUL 0x4340
568 #define T_OPCODE_TST 0x4200
569 #define T_OPCODE_CMN 0x42c0
570 #define T_OPCODE_NEG 0x4240
571 #define T_OPCODE_MVN 0x43c0
572
573 #define T_OPCODE_ADD_R3 0x1800
574 #define T_OPCODE_SUB_R3 0x1a00
575 #define T_OPCODE_ADD_HI 0x4400
576 #define T_OPCODE_ADD_ST 0xb000
577 #define T_OPCODE_SUB_ST 0xb080
578 #define T_OPCODE_ADD_SP 0xa800
579 #define T_OPCODE_ADD_PC 0xa000
580 #define T_OPCODE_ADD_I8 0x3000
581 #define T_OPCODE_SUB_I8 0x3800
582 #define T_OPCODE_ADD_I3 0x1c00
583 #define T_OPCODE_SUB_I3 0x1e00
584
585 #define T_OPCODE_ASR_R 0x4100
586 #define T_OPCODE_LSL_R 0x4080
587 #define T_OPCODE_LSR_R 0x40c0
588 #define T_OPCODE_ROR_R 0x41c0
589 #define T_OPCODE_ASR_I 0x1000
590 #define T_OPCODE_LSL_I 0x0000
591 #define T_OPCODE_LSR_I 0x0800
592
593 #define T_OPCODE_MOV_I8 0x2000
594 #define T_OPCODE_CMP_I8 0x2800
595 #define T_OPCODE_CMP_LR 0x4280
596 #define T_OPCODE_MOV_HR 0x4600
597 #define T_OPCODE_CMP_HR 0x4500
598
599 #define T_OPCODE_LDR_PC 0x4800
600 #define T_OPCODE_LDR_SP 0x9800
601 #define T_OPCODE_STR_SP 0x9000
602 #define T_OPCODE_LDR_IW 0x6800
603 #define T_OPCODE_STR_IW 0x6000
604 #define T_OPCODE_LDR_IH 0x8800
605 #define T_OPCODE_STR_IH 0x8000
606 #define T_OPCODE_LDR_IB 0x7800
607 #define T_OPCODE_STR_IB 0x7000
608 #define T_OPCODE_LDR_RW 0x5800
609 #define T_OPCODE_STR_RW 0x5000
610 #define T_OPCODE_LDR_RH 0x5a00
611 #define T_OPCODE_STR_RH 0x5200
612 #define T_OPCODE_LDR_RB 0x5c00
613 #define T_OPCODE_STR_RB 0x5400
614
615 #define T_OPCODE_PUSH 0xb400
616 #define T_OPCODE_POP 0xbc00
617
618 #define T_OPCODE_BRANCH 0xe000
619
620 #define THUMB_SIZE 2 /* Size of thumb instruction. */
621 #define THUMB_PP_PC_LR 0x0100
622 #define THUMB_LOAD_BIT 0x0800
623 #define THUMB2_LOAD_BIT 0x00100000
624
625 #define BAD_ARGS _("bad arguments to instruction")
626 #define BAD_PC _("r15 not allowed here")
627 #define BAD_COND _("instruction cannot be conditional")
628 #define BAD_OVERLAP _("registers may not be the same")
629 #define BAD_HIREG _("lo register required")
630 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
631 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
632 #define BAD_BRANCH _("branch must be last instruction in IT block")
633 #define BAD_NOT_IT _("instruction not allowed in IT block")
634
635 static struct hash_control *arm_ops_hsh;
636 static struct hash_control *arm_cond_hsh;
637 static struct hash_control *arm_shift_hsh;
638 static struct hash_control *arm_psr_hsh;
639 static struct hash_control *arm_v7m_psr_hsh;
640 static struct hash_control *arm_reg_hsh;
641 static struct hash_control *arm_reloc_hsh;
642 static struct hash_control *arm_barrier_opt_hsh;
643
644 /* Stuff needed to resolve the label ambiguity
645 As:
646 ...
647 label: <insn>
648 may differ from:
649 ...
650 label:
651 <insn>
652 */
653
654 symbolS * last_label_seen;
655 static int label_is_thumb_function_name = FALSE;
656 \f
657 /* Literal pool structure. Held on a per-section
658 and per-sub-section basis. */
659
660 #define MAX_LITERAL_POOL_SIZE 1024
661 typedef struct literal_pool
662 {
663 expressionS literals [MAX_LITERAL_POOL_SIZE];
664 unsigned int next_free_entry;
665 unsigned int id;
666 symbolS * symbol;
667 segT section;
668 subsegT sub_section;
669 struct literal_pool * next;
670 } literal_pool;
671
672 /* Pointer to a linked list of literal pools. */
673 literal_pool * list_of_pools = NULL;
674
675 /* State variables for IT block handling. */
676 static bfd_boolean current_it_mask = 0;
677 static int current_cc;
678
679 \f
680 /* Pure syntax. */
681
682 /* This array holds the chars that always start a comment. If the
683 pre-processor is disabled, these aren't very useful. */
684 const char comment_chars[] = "@";
685
686 /* This array holds the chars that only start a comment at the beginning of
687 a line. If the line seems to have the form '# 123 filename'
688 .line and .file directives will appear in the pre-processed output. */
689 /* Note that input_file.c hand checks for '#' at the beginning of the
690 first line of the input file. This is because the compiler outputs
691 #NO_APP at the beginning of its output. */
692 /* Also note that comments like this one will always work. */
693 const char line_comment_chars[] = "#";
694
695 const char line_separator_chars[] = ";";
696
697 /* Chars that can be used to separate mant
698 from exp in floating point numbers. */
699 const char EXP_CHARS[] = "eE";
700
701 /* Chars that mean this number is a floating point constant. */
702 /* As in 0f12.456 */
703 /* or 0d1.2345e12 */
704
705 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
706
707 /* Prefix characters that indicate the start of an immediate
708 value. */
709 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
710
711 /* Separator character handling. */
712
713 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
714
715 static inline int
716 skip_past_char (char ** str, char c)
717 {
718 if (**str == c)
719 {
720 (*str)++;
721 return SUCCESS;
722 }
723 else
724 return FAIL;
725 }
726 #define skip_past_comma(str) skip_past_char (str, ',')
727
728 /* Arithmetic expressions (possibly involving symbols). */
729
730 /* Return TRUE if anything in the expression is a bignum. */
731
732 static int
733 walk_no_bignums (symbolS * sp)
734 {
735 if (symbol_get_value_expression (sp)->X_op == O_big)
736 return 1;
737
738 if (symbol_get_value_expression (sp)->X_add_symbol)
739 {
740 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
741 || (symbol_get_value_expression (sp)->X_op_symbol
742 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
743 }
744
745 return 0;
746 }
747
748 static int in_my_get_expression = 0;
749
750 /* Third argument to my_get_expression. */
751 #define GE_NO_PREFIX 0
752 #define GE_IMM_PREFIX 1
753 #define GE_OPT_PREFIX 2
754 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
755 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
756 #define GE_OPT_PREFIX_BIG 3
757
758 static int
759 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
760 {
761 char * save_in;
762 segT seg;
763
764 /* In unified syntax, all prefixes are optional. */
765 if (unified_syntax)
766 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
767 : GE_OPT_PREFIX;
768
769 switch (prefix_mode)
770 {
771 case GE_NO_PREFIX: break;
772 case GE_IMM_PREFIX:
773 if (!is_immediate_prefix (**str))
774 {
775 inst.error = _("immediate expression requires a # prefix");
776 return FAIL;
777 }
778 (*str)++;
779 break;
780 case GE_OPT_PREFIX:
781 case GE_OPT_PREFIX_BIG:
782 if (is_immediate_prefix (**str))
783 (*str)++;
784 break;
785 default: abort ();
786 }
787
788 memset (ep, 0, sizeof (expressionS));
789
790 save_in = input_line_pointer;
791 input_line_pointer = *str;
792 in_my_get_expression = 1;
793 seg = expression (ep);
794 in_my_get_expression = 0;
795
796 if (ep->X_op == O_illegal)
797 {
798 /* We found a bad expression in md_operand(). */
799 *str = input_line_pointer;
800 input_line_pointer = save_in;
801 if (inst.error == NULL)
802 inst.error = _("bad expression");
803 return 1;
804 }
805
806 #ifdef OBJ_AOUT
807 if (seg != absolute_section
808 && seg != text_section
809 && seg != data_section
810 && seg != bss_section
811 && seg != undefined_section)
812 {
813 inst.error = _("bad segment");
814 *str = input_line_pointer;
815 input_line_pointer = save_in;
816 return 1;
817 }
818 #endif
819
820 /* Get rid of any bignums now, so that we don't generate an error for which
821 we can't establish a line number later on. Big numbers are never valid
822 in instructions, which is where this routine is always called. */
823 if (prefix_mode != GE_OPT_PREFIX_BIG
824 && (ep->X_op == O_big
825 || (ep->X_add_symbol
826 && (walk_no_bignums (ep->X_add_symbol)
827 || (ep->X_op_symbol
828 && walk_no_bignums (ep->X_op_symbol))))))
829 {
830 inst.error = _("invalid constant");
831 *str = input_line_pointer;
832 input_line_pointer = save_in;
833 return 1;
834 }
835
836 *str = input_line_pointer;
837 input_line_pointer = save_in;
838 return 0;
839 }
840
841 /* Turn a string in input_line_pointer into a floating point constant
842 of type TYPE, and store the appropriate bytes in *LITP. The number
843 of LITTLENUMS emitted is stored in *SIZEP. An error message is
844 returned, or NULL on OK.
845
846 Note that fp constants aren't represent in the normal way on the ARM.
847 In big endian mode, things are as expected. However, in little endian
848 mode fp constants are big-endian word-wise, and little-endian byte-wise
849 within the words. For example, (double) 1.1 in big endian mode is
850 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
851 the byte sequence 99 99 f1 3f 9a 99 99 99.
852
853 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
854
855 char *
856 md_atof (int type, char * litP, int * sizeP)
857 {
858 int prec;
859 LITTLENUM_TYPE words[MAX_LITTLENUMS];
860 char *t;
861 int i;
862
863 switch (type)
864 {
865 case 'f':
866 case 'F':
867 case 's':
868 case 'S':
869 prec = 2;
870 break;
871
872 case 'd':
873 case 'D':
874 case 'r':
875 case 'R':
876 prec = 4;
877 break;
878
879 case 'x':
880 case 'X':
881 prec = 6;
882 break;
883
884 case 'p':
885 case 'P':
886 prec = 6;
887 break;
888
889 default:
890 *sizeP = 0;
891 return _("bad call to MD_ATOF()");
892 }
893
894 t = atof_ieee (input_line_pointer, type, words);
895 if (t)
896 input_line_pointer = t;
897 *sizeP = prec * 2;
898
899 if (target_big_endian)
900 {
901 for (i = 0; i < prec; i++)
902 {
903 md_number_to_chars (litP, (valueT) words[i], 2);
904 litP += 2;
905 }
906 }
907 else
908 {
909 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
910 for (i = prec - 1; i >= 0; i--)
911 {
912 md_number_to_chars (litP, (valueT) words[i], 2);
913 litP += 2;
914 }
915 else
916 /* For a 4 byte float the order of elements in `words' is 1 0.
917 For an 8 byte float the order is 1 0 3 2. */
918 for (i = 0; i < prec; i += 2)
919 {
920 md_number_to_chars (litP, (valueT) words[i + 1], 2);
921 md_number_to_chars (litP + 2, (valueT) words[i], 2);
922 litP += 4;
923 }
924 }
925
926 return 0;
927 }
928
929 /* We handle all bad expressions here, so that we can report the faulty
930 instruction in the error message. */
931 void
932 md_operand (expressionS * expr)
933 {
934 if (in_my_get_expression)
935 expr->X_op = O_illegal;
936 }
937
938 /* Immediate values. */
939
940 /* Generic immediate-value read function for use in directives.
941 Accepts anything that 'expression' can fold to a constant.
942 *val receives the number. */
943 #ifdef OBJ_ELF
944 static int
945 immediate_for_directive (int *val)
946 {
947 expressionS exp;
948 exp.X_op = O_illegal;
949
950 if (is_immediate_prefix (*input_line_pointer))
951 {
952 input_line_pointer++;
953 expression (&exp);
954 }
955
956 if (exp.X_op != O_constant)
957 {
958 as_bad (_("expected #constant"));
959 ignore_rest_of_line ();
960 return FAIL;
961 }
962 *val = exp.X_add_number;
963 return SUCCESS;
964 }
965 #endif
966
967 /* Register parsing. */
968
969 /* Generic register parser. CCP points to what should be the
970 beginning of a register name. If it is indeed a valid register
971 name, advance CCP over it and return the reg_entry structure;
972 otherwise return NULL. Does not issue diagnostics. */
973
974 static struct reg_entry *
975 arm_reg_parse_multi (char **ccp)
976 {
977 char *start = *ccp;
978 char *p;
979 struct reg_entry *reg;
980
981 #ifdef REGISTER_PREFIX
982 if (*start != REGISTER_PREFIX)
983 return NULL;
984 start++;
985 #endif
986 #ifdef OPTIONAL_REGISTER_PREFIX
987 if (*start == OPTIONAL_REGISTER_PREFIX)
988 start++;
989 #endif
990
991 p = start;
992 if (!ISALPHA (*p) || !is_name_beginner (*p))
993 return NULL;
994
995 do
996 p++;
997 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
998
999 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1000
1001 if (!reg)
1002 return NULL;
1003
1004 *ccp = p;
1005 return reg;
1006 }
1007
1008 static int
1009 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1010 enum arm_reg_type type)
1011 {
1012 /* Alternative syntaxes are accepted for a few register classes. */
1013 switch (type)
1014 {
1015 case REG_TYPE_MVF:
1016 case REG_TYPE_MVD:
1017 case REG_TYPE_MVFX:
1018 case REG_TYPE_MVDX:
1019 /* Generic coprocessor register names are allowed for these. */
1020 if (reg && reg->type == REG_TYPE_CN)
1021 return reg->number;
1022 break;
1023
1024 case REG_TYPE_CP:
1025 /* For backward compatibility, a bare number is valid here. */
1026 {
1027 unsigned long processor = strtoul (start, ccp, 10);
1028 if (*ccp != start && processor <= 15)
1029 return processor;
1030 }
1031
1032 case REG_TYPE_MMXWC:
1033 /* WC includes WCG. ??? I'm not sure this is true for all
1034 instructions that take WC registers. */
1035 if (reg && reg->type == REG_TYPE_MMXWCG)
1036 return reg->number;
1037 break;
1038
1039 default:
1040 break;
1041 }
1042
1043 return FAIL;
1044 }
1045
1046 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1047 return value is the register number or FAIL. */
1048
1049 static int
1050 arm_reg_parse (char **ccp, enum arm_reg_type type)
1051 {
1052 char *start = *ccp;
1053 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1054 int ret;
1055
1056 /* Do not allow a scalar (reg+index) to parse as a register. */
1057 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1058 return FAIL;
1059
1060 if (reg && reg->type == type)
1061 return reg->number;
1062
1063 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1064 return ret;
1065
1066 *ccp = start;
1067 return FAIL;
1068 }
1069
1070 /* Parse a Neon type specifier. *STR should point at the leading '.'
1071 character. Does no verification at this stage that the type fits the opcode
1072 properly. E.g.,
1073
1074 .i32.i32.s16
1075 .s32.f32
1076 .u16
1077
1078 Can all be legally parsed by this function.
1079
1080 Fills in neon_type struct pointer with parsed information, and updates STR
1081 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1082 type, FAIL if not. */
1083
1084 static int
1085 parse_neon_type (struct neon_type *type, char **str)
1086 {
1087 char *ptr = *str;
1088
1089 if (type)
1090 type->elems = 0;
1091
1092 while (type->elems < NEON_MAX_TYPE_ELS)
1093 {
1094 enum neon_el_type thistype = NT_untyped;
1095 unsigned thissize = -1u;
1096
1097 if (*ptr != '.')
1098 break;
1099
1100 ptr++;
1101
1102 /* Just a size without an explicit type. */
1103 if (ISDIGIT (*ptr))
1104 goto parsesize;
1105
1106 switch (TOLOWER (*ptr))
1107 {
1108 case 'i': thistype = NT_integer; break;
1109 case 'f': thistype = NT_float; break;
1110 case 'p': thistype = NT_poly; break;
1111 case 's': thistype = NT_signed; break;
1112 case 'u': thistype = NT_unsigned; break;
1113 default:
1114 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1115 return FAIL;
1116 }
1117
1118 ptr++;
1119
1120 /* .f is an abbreviation for .f32. */
1121 if (thistype == NT_float && !ISDIGIT (*ptr))
1122 thissize = 32;
1123 else
1124 {
1125 parsesize:
1126 thissize = strtoul (ptr, &ptr, 10);
1127
1128 if (thissize != 8 && thissize != 16 && thissize != 32
1129 && thissize != 64)
1130 {
1131 as_bad (_("bad size %d in type specifier"), thissize);
1132 return FAIL;
1133 }
1134 }
1135
1136 if (type)
1137 {
1138 type->el[type->elems].type = thistype;
1139 type->el[type->elems].size = thissize;
1140 type->elems++;
1141 }
1142 }
1143
1144 /* Empty/missing type is not a successful parse. */
1145 if (type->elems == 0)
1146 return FAIL;
1147
1148 *str = ptr;
1149
1150 return SUCCESS;
1151 }
1152
1153 /* Errors may be set multiple times during parsing or bit encoding
1154 (particularly in the Neon bits), but usually the earliest error which is set
1155 will be the most meaningful. Avoid overwriting it with later (cascading)
1156 errors by calling this function. */
1157
1158 static void
1159 first_error (const char *err)
1160 {
1161 if (!inst.error)
1162 inst.error = err;
1163 }
1164
1165 /* Parse a single type, e.g. ".s32", leading period included. */
1166 static int
1167 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1168 {
1169 char *str = *ccp;
1170 struct neon_type optype;
1171
1172 if (*str == '.')
1173 {
1174 if (parse_neon_type (&optype, &str) == SUCCESS)
1175 {
1176 if (optype.elems == 1)
1177 *vectype = optype.el[0];
1178 else
1179 {
1180 first_error (_("only one type should be specified for operand"));
1181 return FAIL;
1182 }
1183 }
1184 else
1185 {
1186 first_error (_("vector type expected"));
1187 return FAIL;
1188 }
1189 }
1190 else
1191 return FAIL;
1192
1193 *ccp = str;
1194
1195 return SUCCESS;
1196 }
1197
1198 /* Special meanings for indices (which have a range of 0-7), which will fit into
1199 a 4-bit integer. */
1200
1201 #define NEON_ALL_LANES 15
1202 #define NEON_INTERLEAVE_LANES 14
1203
1204 /* Parse either a register or a scalar, with an optional type. Return the
1205 register number, and optionally fill in the actual type of the register
1206 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1207 type/index information in *TYPEINFO. */
1208
1209 static int
1210 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1211 enum arm_reg_type *rtype,
1212 struct neon_typed_alias *typeinfo)
1213 {
1214 char *str = *ccp;
1215 struct reg_entry *reg = arm_reg_parse_multi (&str);
1216 struct neon_typed_alias atype;
1217 struct neon_type_el parsetype;
1218
1219 atype.defined = 0;
1220 atype.index = -1;
1221 atype.eltype.type = NT_invtype;
1222 atype.eltype.size = -1;
1223
1224 /* Try alternate syntax for some types of register. Note these are mutually
1225 exclusive with the Neon syntax extensions. */
1226 if (reg == NULL)
1227 {
1228 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1229 if (altreg != FAIL)
1230 *ccp = str;
1231 if (typeinfo)
1232 *typeinfo = atype;
1233 return altreg;
1234 }
1235
1236 /* Undo polymorphism for Neon D and Q registers. */
1237 if (type == REG_TYPE_NDQ
1238 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1239 type = reg->type;
1240
1241 if (type != reg->type)
1242 return FAIL;
1243
1244 if (reg->neon)
1245 atype = *reg->neon;
1246
1247 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1248 {
1249 if ((atype.defined & NTA_HASTYPE) != 0)
1250 {
1251 first_error (_("can't redefine type for operand"));
1252 return FAIL;
1253 }
1254 atype.defined |= NTA_HASTYPE;
1255 atype.eltype = parsetype;
1256 }
1257
1258 if (skip_past_char (&str, '[') == SUCCESS)
1259 {
1260 if (type != REG_TYPE_VFD)
1261 {
1262 first_error (_("only D registers may be indexed"));
1263 return FAIL;
1264 }
1265
1266 if ((atype.defined & NTA_HASINDEX) != 0)
1267 {
1268 first_error (_("can't change index for operand"));
1269 return FAIL;
1270 }
1271
1272 atype.defined |= NTA_HASINDEX;
1273
1274 if (skip_past_char (&str, ']') == SUCCESS)
1275 atype.index = NEON_ALL_LANES;
1276 else
1277 {
1278 expressionS exp;
1279
1280 my_get_expression (&exp, &str, GE_NO_PREFIX);
1281
1282 if (exp.X_op != O_constant)
1283 {
1284 first_error (_("constant expression required"));
1285 return FAIL;
1286 }
1287
1288 if (skip_past_char (&str, ']') == FAIL)
1289 return FAIL;
1290
1291 atype.index = exp.X_add_number;
1292 }
1293 }
1294
1295 if (typeinfo)
1296 *typeinfo = atype;
1297
1298 if (rtype)
1299 *rtype = type;
1300
1301 *ccp = str;
1302
1303 return reg->number;
1304 }
1305
1306 /* Like arm_reg_parse, but allow allow the following extra features:
1307 - If RTYPE is non-zero, return the (possibly restricted) type of the
1308 register (e.g. Neon double or quad reg when either has been requested).
1309 - If this is a Neon vector type with additional type information, fill
1310 in the struct pointed to by VECTYPE (if non-NULL).
1311 This function will fault on encountering a scalar.
1312 */
1313
1314 static int
1315 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1316 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1317 {
1318 struct neon_typed_alias atype;
1319 char *str = *ccp;
1320 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1321
1322 if (reg == FAIL)
1323 return FAIL;
1324
1325 /* Do not allow a scalar (reg+index) to parse as a register. */
1326 if ((atype.defined & NTA_HASINDEX) != 0)
1327 {
1328 first_error (_("register operand expected, but got scalar"));
1329 return FAIL;
1330 }
1331
1332 if (vectype)
1333 *vectype = atype.eltype;
1334
1335 *ccp = str;
1336
1337 return reg;
1338 }
1339
1340 #define NEON_SCALAR_REG(X) ((X) >> 4)
1341 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1342
1343 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1344 have enough information to be able to do a good job bounds-checking. So, we
1345 just do easy checks here, and do further checks later. */
1346
1347 static int
1348 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1349 {
1350 int reg;
1351 char *str = *ccp;
1352 struct neon_typed_alias atype;
1353
1354 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1355
1356 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1357 return FAIL;
1358
1359 if (atype.index == NEON_ALL_LANES)
1360 {
1361 first_error (_("scalar must have an index"));
1362 return FAIL;
1363 }
1364 else if (atype.index >= 64 / elsize)
1365 {
1366 first_error (_("scalar index out of range"));
1367 return FAIL;
1368 }
1369
1370 if (type)
1371 *type = atype.eltype;
1372
1373 *ccp = str;
1374
1375 return reg * 16 + atype.index;
1376 }
1377
1378 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1379 static long
1380 parse_reg_list (char ** strp)
1381 {
1382 char * str = * strp;
1383 long range = 0;
1384 int another_range;
1385
1386 /* We come back here if we get ranges concatenated by '+' or '|'. */
1387 do
1388 {
1389 another_range = 0;
1390
1391 if (*str == '{')
1392 {
1393 int in_range = 0;
1394 int cur_reg = -1;
1395
1396 str++;
1397 do
1398 {
1399 int reg;
1400
1401 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1402 {
1403 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1404 return FAIL;
1405 }
1406
1407 if (in_range)
1408 {
1409 int i;
1410
1411 if (reg <= cur_reg)
1412 {
1413 first_error (_("bad range in register list"));
1414 return FAIL;
1415 }
1416
1417 for (i = cur_reg + 1; i < reg; i++)
1418 {
1419 if (range & (1 << i))
1420 as_tsktsk
1421 (_("Warning: duplicated register (r%d) in register list"),
1422 i);
1423 else
1424 range |= 1 << i;
1425 }
1426 in_range = 0;
1427 }
1428
1429 if (range & (1 << reg))
1430 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1431 reg);
1432 else if (reg <= cur_reg)
1433 as_tsktsk (_("Warning: register range not in ascending order"));
1434
1435 range |= 1 << reg;
1436 cur_reg = reg;
1437 }
1438 while (skip_past_comma (&str) != FAIL
1439 || (in_range = 1, *str++ == '-'));
1440 str--;
1441
1442 if (*str++ != '}')
1443 {
1444 first_error (_("missing `}'"));
1445 return FAIL;
1446 }
1447 }
1448 else
1449 {
1450 expressionS expr;
1451
1452 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1453 return FAIL;
1454
1455 if (expr.X_op == O_constant)
1456 {
1457 if (expr.X_add_number
1458 != (expr.X_add_number & 0x0000ffff))
1459 {
1460 inst.error = _("invalid register mask");
1461 return FAIL;
1462 }
1463
1464 if ((range & expr.X_add_number) != 0)
1465 {
1466 int regno = range & expr.X_add_number;
1467
1468 regno &= -regno;
1469 regno = (1 << regno) - 1;
1470 as_tsktsk
1471 (_("Warning: duplicated register (r%d) in register list"),
1472 regno);
1473 }
1474
1475 range |= expr.X_add_number;
1476 }
1477 else
1478 {
1479 if (inst.reloc.type != 0)
1480 {
1481 inst.error = _("expression too complex");
1482 return FAIL;
1483 }
1484
1485 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1486 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1487 inst.reloc.pc_rel = 0;
1488 }
1489 }
1490
1491 if (*str == '|' || *str == '+')
1492 {
1493 str++;
1494 another_range = 1;
1495 }
1496 }
1497 while (another_range);
1498
1499 *strp = str;
1500 return range;
1501 }
1502
1503 /* Types of registers in a list. */
1504
1505 enum reg_list_els
1506 {
1507 REGLIST_VFP_S,
1508 REGLIST_VFP_D,
1509 REGLIST_NEON_D
1510 };
1511
1512 /* Parse a VFP register list. If the string is invalid return FAIL.
1513 Otherwise return the number of registers, and set PBASE to the first
1514 register. Parses registers of type ETYPE.
1515 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1516 - Q registers can be used to specify pairs of D registers
1517 - { } can be omitted from around a singleton register list
1518 FIXME: This is not implemented, as it would require backtracking in
1519 some cases, e.g.:
1520 vtbl.8 d3,d4,d5
1521 This could be done (the meaning isn't really ambiguous), but doesn't
1522 fit in well with the current parsing framework.
1523 - 32 D registers may be used (also true for VFPv3).
1524 FIXME: Types are ignored in these register lists, which is probably a
1525 bug. */
1526
1527 static int
1528 parse_vfp_reg_list (char **str, unsigned int *pbase, enum reg_list_els etype)
1529 {
1530 int base_reg;
1531 int new_base;
1532 enum arm_reg_type regtype = 0;
1533 int max_regs = 0;
1534 int count = 0;
1535 int warned = 0;
1536 unsigned long mask = 0;
1537 int i;
1538
1539 if (**str != '{')
1540 {
1541 inst.error = _("expecting {");
1542 return FAIL;
1543 }
1544
1545 (*str)++;
1546
1547 switch (etype)
1548 {
1549 case REGLIST_VFP_S:
1550 regtype = REG_TYPE_VFS;
1551 max_regs = 32;
1552 break;
1553
1554 case REGLIST_VFP_D:
1555 regtype = REG_TYPE_VFD;
1556 break;
1557
1558 case REGLIST_NEON_D:
1559 regtype = REG_TYPE_NDQ;
1560 break;
1561 }
1562
1563 if (etype != REGLIST_VFP_S)
1564 {
1565 /* VFPv3 allows 32 D registers. */
1566 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1567 {
1568 max_regs = 32;
1569 if (thumb_mode)
1570 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1571 fpu_vfp_ext_v3);
1572 else
1573 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1574 fpu_vfp_ext_v3);
1575 }
1576 else
1577 max_regs = 16;
1578 }
1579
1580 base_reg = max_regs;
1581
1582 do
1583 {
1584 int setmask = 1, addregs = 1;
1585
1586 new_base = arm_typed_reg_parse (str, regtype, &regtype, NULL);
1587
1588 if (new_base == FAIL)
1589 {
1590 first_error (_(reg_expected_msgs[regtype]));
1591 return FAIL;
1592 }
1593
1594 if (new_base >= max_regs)
1595 {
1596 first_error (_("register out of range in list"));
1597 return FAIL;
1598 }
1599
1600 /* Note: a value of 2 * n is returned for the register Q<n>. */
1601 if (regtype == REG_TYPE_NQ)
1602 {
1603 setmask = 3;
1604 addregs = 2;
1605 }
1606
1607 if (new_base < base_reg)
1608 base_reg = new_base;
1609
1610 if (mask & (setmask << new_base))
1611 {
1612 first_error (_("invalid register list"));
1613 return FAIL;
1614 }
1615
1616 if ((mask >> new_base) != 0 && ! warned)
1617 {
1618 as_tsktsk (_("register list not in ascending order"));
1619 warned = 1;
1620 }
1621
1622 mask |= setmask << new_base;
1623 count += addregs;
1624
1625 if (**str == '-') /* We have the start of a range expression */
1626 {
1627 int high_range;
1628
1629 (*str)++;
1630
1631 if ((high_range = arm_typed_reg_parse (str, regtype, NULL, NULL))
1632 == FAIL)
1633 {
1634 inst.error = gettext (reg_expected_msgs[regtype]);
1635 return FAIL;
1636 }
1637
1638 if (high_range >= max_regs)
1639 {
1640 first_error (_("register out of range in list"));
1641 return FAIL;
1642 }
1643
1644 if (regtype == REG_TYPE_NQ)
1645 high_range = high_range + 1;
1646
1647 if (high_range <= new_base)
1648 {
1649 inst.error = _("register range not in ascending order");
1650 return FAIL;
1651 }
1652
1653 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1654 {
1655 if (mask & (setmask << new_base))
1656 {
1657 inst.error = _("invalid register list");
1658 return FAIL;
1659 }
1660
1661 mask |= setmask << new_base;
1662 count += addregs;
1663 }
1664 }
1665 }
1666 while (skip_past_comma (str) != FAIL);
1667
1668 (*str)++;
1669
1670 /* Sanity check -- should have raised a parse error above. */
1671 if (count == 0 || count > max_regs)
1672 abort ();
1673
1674 *pbase = base_reg;
1675
1676 /* Final test -- the registers must be consecutive. */
1677 mask >>= base_reg;
1678 for (i = 0; i < count; i++)
1679 {
1680 if ((mask & (1u << i)) == 0)
1681 {
1682 inst.error = _("non-contiguous register range");
1683 return FAIL;
1684 }
1685 }
1686
1687 return count;
1688 }
1689
1690 /* True if two alias types are the same. */
1691
1692 static int
1693 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1694 {
1695 if (!a && !b)
1696 return 1;
1697
1698 if (!a || !b)
1699 return 0;
1700
1701 if (a->defined != b->defined)
1702 return 0;
1703
1704 if ((a->defined & NTA_HASTYPE) != 0
1705 && (a->eltype.type != b->eltype.type
1706 || a->eltype.size != b->eltype.size))
1707 return 0;
1708
1709 if ((a->defined & NTA_HASINDEX) != 0
1710 && (a->index != b->index))
1711 return 0;
1712
1713 return 1;
1714 }
1715
1716 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1717 The base register is put in *PBASE.
1718 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1719 the return value.
1720 The register stride (minus one) is put in bit 4 of the return value.
1721 Bits [6:5] encode the list length (minus one).
1722 The type of the list elements is put in *ELTYPE, if non-NULL. */
1723
1724 #define NEON_LANE(X) ((X) & 0xf)
1725 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1726 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1727
1728 static int
1729 parse_neon_el_struct_list (char **str, unsigned *pbase,
1730 struct neon_type_el *eltype)
1731 {
1732 char *ptr = *str;
1733 int base_reg = -1;
1734 int reg_incr = -1;
1735 int count = 0;
1736 int lane = -1;
1737 int leading_brace = 0;
1738 enum arm_reg_type rtype = REG_TYPE_NDQ;
1739 int addregs = 1;
1740 const char *const incr_error = "register stride must be 1 or 2";
1741 const char *const type_error = "mismatched element/structure types in list";
1742 struct neon_typed_alias firsttype;
1743
1744 if (skip_past_char (&ptr, '{') == SUCCESS)
1745 leading_brace = 1;
1746
1747 do
1748 {
1749 struct neon_typed_alias atype;
1750 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1751
1752 if (getreg == FAIL)
1753 {
1754 first_error (_(reg_expected_msgs[rtype]));
1755 return FAIL;
1756 }
1757
1758 if (base_reg == -1)
1759 {
1760 base_reg = getreg;
1761 if (rtype == REG_TYPE_NQ)
1762 {
1763 reg_incr = 1;
1764 addregs = 2;
1765 }
1766 firsttype = atype;
1767 }
1768 else if (reg_incr == -1)
1769 {
1770 reg_incr = getreg - base_reg;
1771 if (reg_incr < 1 || reg_incr > 2)
1772 {
1773 first_error (_(incr_error));
1774 return FAIL;
1775 }
1776 }
1777 else if (getreg != base_reg + reg_incr * count)
1778 {
1779 first_error (_(incr_error));
1780 return FAIL;
1781 }
1782
1783 if (!neon_alias_types_same (&atype, &firsttype))
1784 {
1785 first_error (_(type_error));
1786 return FAIL;
1787 }
1788
1789 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1790 modes. */
1791 if (ptr[0] == '-')
1792 {
1793 struct neon_typed_alias htype;
1794 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1795 if (lane == -1)
1796 lane = NEON_INTERLEAVE_LANES;
1797 else if (lane != NEON_INTERLEAVE_LANES)
1798 {
1799 first_error (_(type_error));
1800 return FAIL;
1801 }
1802 if (reg_incr == -1)
1803 reg_incr = 1;
1804 else if (reg_incr != 1)
1805 {
1806 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1807 return FAIL;
1808 }
1809 ptr++;
1810 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1811 if (hireg == FAIL)
1812 {
1813 first_error (_(reg_expected_msgs[rtype]));
1814 return FAIL;
1815 }
1816 if (!neon_alias_types_same (&htype, &firsttype))
1817 {
1818 first_error (_(type_error));
1819 return FAIL;
1820 }
1821 count += hireg + dregs - getreg;
1822 continue;
1823 }
1824
1825 /* If we're using Q registers, we can't use [] or [n] syntax. */
1826 if (rtype == REG_TYPE_NQ)
1827 {
1828 count += 2;
1829 continue;
1830 }
1831
1832 if ((atype.defined & NTA_HASINDEX) != 0)
1833 {
1834 if (lane == -1)
1835 lane = atype.index;
1836 else if (lane != atype.index)
1837 {
1838 first_error (_(type_error));
1839 return FAIL;
1840 }
1841 }
1842 else if (lane == -1)
1843 lane = NEON_INTERLEAVE_LANES;
1844 else if (lane != NEON_INTERLEAVE_LANES)
1845 {
1846 first_error (_(type_error));
1847 return FAIL;
1848 }
1849 count++;
1850 }
1851 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1852
1853 /* No lane set by [x]. We must be interleaving structures. */
1854 if (lane == -1)
1855 lane = NEON_INTERLEAVE_LANES;
1856
1857 /* Sanity check. */
1858 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1859 || (count > 1 && reg_incr == -1))
1860 {
1861 first_error (_("error parsing element/structure list"));
1862 return FAIL;
1863 }
1864
1865 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1866 {
1867 first_error (_("expected }"));
1868 return FAIL;
1869 }
1870
1871 if (reg_incr == -1)
1872 reg_incr = 1;
1873
1874 if (eltype)
1875 *eltype = firsttype.eltype;
1876
1877 *pbase = base_reg;
1878 *str = ptr;
1879
1880 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1881 }
1882
1883 /* Parse an explicit relocation suffix on an expression. This is
1884 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1885 arm_reloc_hsh contains no entries, so this function can only
1886 succeed if there is no () after the word. Returns -1 on error,
1887 BFD_RELOC_UNUSED if there wasn't any suffix. */
1888 static int
1889 parse_reloc (char **str)
1890 {
1891 struct reloc_entry *r;
1892 char *p, *q;
1893
1894 if (**str != '(')
1895 return BFD_RELOC_UNUSED;
1896
1897 p = *str + 1;
1898 q = p;
1899
1900 while (*q && *q != ')' && *q != ',')
1901 q++;
1902 if (*q != ')')
1903 return -1;
1904
1905 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1906 return -1;
1907
1908 *str = q + 1;
1909 return r->reloc;
1910 }
1911
1912 /* Directives: register aliases. */
1913
1914 static struct reg_entry *
1915 insert_reg_alias (char *str, int number, int type)
1916 {
1917 struct reg_entry *new;
1918 const char *name;
1919
1920 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1921 {
1922 if (new->builtin)
1923 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1924
1925 /* Only warn about a redefinition if it's not defined as the
1926 same register. */
1927 else if (new->number != number || new->type != type)
1928 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1929
1930 return 0;
1931 }
1932
1933 name = xstrdup (str);
1934 new = xmalloc (sizeof (struct reg_entry));
1935
1936 new->name = name;
1937 new->number = number;
1938 new->type = type;
1939 new->builtin = FALSE;
1940 new->neon = NULL;
1941
1942 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1943 abort ();
1944
1945 return new;
1946 }
1947
1948 static void
1949 insert_neon_reg_alias (char *str, int number, int type,
1950 struct neon_typed_alias *atype)
1951 {
1952 struct reg_entry *reg = insert_reg_alias (str, number, type);
1953
1954 if (!reg)
1955 {
1956 first_error (_("attempt to redefine typed alias"));
1957 return;
1958 }
1959
1960 if (atype)
1961 {
1962 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
1963 *reg->neon = *atype;
1964 }
1965 }
1966
1967 /* Look for the .req directive. This is of the form:
1968
1969 new_register_name .req existing_register_name
1970
1971 If we find one, or if it looks sufficiently like one that we want to
1972 handle any error here, return non-zero. Otherwise return zero. */
1973
1974 static int
1975 create_register_alias (char * newname, char *p)
1976 {
1977 struct reg_entry *old;
1978 char *oldname, *nbuf;
1979 size_t nlen;
1980
1981 /* The input scrubber ensures that whitespace after the mnemonic is
1982 collapsed to single spaces. */
1983 oldname = p;
1984 if (strncmp (oldname, " .req ", 6) != 0)
1985 return 0;
1986
1987 oldname += 6;
1988 if (*oldname == '\0')
1989 return 0;
1990
1991 old = hash_find (arm_reg_hsh, oldname);
1992 if (!old)
1993 {
1994 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1995 return 1;
1996 }
1997
1998 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1999 the desired alias name, and p points to its end. If not, then
2000 the desired alias name is in the global original_case_string. */
2001 #ifdef TC_CASE_SENSITIVE
2002 nlen = p - newname;
2003 #else
2004 newname = original_case_string;
2005 nlen = strlen (newname);
2006 #endif
2007
2008 nbuf = alloca (nlen + 1);
2009 memcpy (nbuf, newname, nlen);
2010 nbuf[nlen] = '\0';
2011
2012 /* Create aliases under the new name as stated; an all-lowercase
2013 version of the new name; and an all-uppercase version of the new
2014 name. */
2015 insert_reg_alias (nbuf, old->number, old->type);
2016
2017 for (p = nbuf; *p; p++)
2018 *p = TOUPPER (*p);
2019
2020 if (strncmp (nbuf, newname, nlen))
2021 insert_reg_alias (nbuf, old->number, old->type);
2022
2023 for (p = nbuf; *p; p++)
2024 *p = TOLOWER (*p);
2025
2026 if (strncmp (nbuf, newname, nlen))
2027 insert_reg_alias (nbuf, old->number, old->type);
2028
2029 return 1;
2030 }
2031
2032 /* Create a Neon typed/indexed register alias using directives, e.g.:
2033 X .dn d5.s32[1]
2034 Y .qn 6.s16
2035 Z .dn d7
2036 T .dn Z[0]
2037 These typed registers can be used instead of the types specified after the
2038 Neon mnemonic, so long as all operands given have types. Types can also be
2039 specified directly, e.g.:
2040 vadd d0.s32, d1.s32, d2.s32
2041 */
2042
2043 static int
2044 create_neon_reg_alias (char *newname, char *p)
2045 {
2046 enum arm_reg_type basetype;
2047 struct reg_entry *basereg;
2048 struct reg_entry mybasereg;
2049 struct neon_type ntype;
2050 struct neon_typed_alias typeinfo;
2051 char *namebuf, *nameend;
2052 int namelen;
2053
2054 typeinfo.defined = 0;
2055 typeinfo.eltype.type = NT_invtype;
2056 typeinfo.eltype.size = -1;
2057 typeinfo.index = -1;
2058
2059 nameend = p;
2060
2061 if (strncmp (p, " .dn ", 5) == 0)
2062 basetype = REG_TYPE_VFD;
2063 else if (strncmp (p, " .qn ", 5) == 0)
2064 basetype = REG_TYPE_NQ;
2065 else
2066 return 0;
2067
2068 p += 5;
2069
2070 if (*p == '\0')
2071 return 0;
2072
2073 basereg = arm_reg_parse_multi (&p);
2074
2075 if (basereg && basereg->type != basetype)
2076 {
2077 as_bad (_("bad type for register"));
2078 return 0;
2079 }
2080
2081 if (basereg == NULL)
2082 {
2083 expressionS exp;
2084 /* Try parsing as an integer. */
2085 my_get_expression (&exp, &p, GE_NO_PREFIX);
2086 if (exp.X_op != O_constant)
2087 {
2088 as_bad (_("expression must be constant"));
2089 return 0;
2090 }
2091 basereg = &mybasereg;
2092 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2093 : exp.X_add_number;
2094 basereg->neon = 0;
2095 }
2096
2097 if (basereg->neon)
2098 typeinfo = *basereg->neon;
2099
2100 if (parse_neon_type (&ntype, &p) == SUCCESS)
2101 {
2102 /* We got a type. */
2103 if (typeinfo.defined & NTA_HASTYPE)
2104 {
2105 as_bad (_("can't redefine the type of a register alias"));
2106 return 0;
2107 }
2108
2109 typeinfo.defined |= NTA_HASTYPE;
2110 if (ntype.elems != 1)
2111 {
2112 as_bad (_("you must specify a single type only"));
2113 return 0;
2114 }
2115 typeinfo.eltype = ntype.el[0];
2116 }
2117
2118 if (skip_past_char (&p, '[') == SUCCESS)
2119 {
2120 expressionS exp;
2121 /* We got a scalar index. */
2122
2123 if (typeinfo.defined & NTA_HASINDEX)
2124 {
2125 as_bad (_("can't redefine the index of a scalar alias"));
2126 return 0;
2127 }
2128
2129 my_get_expression (&exp, &p, GE_NO_PREFIX);
2130
2131 if (exp.X_op != O_constant)
2132 {
2133 as_bad (_("scalar index must be constant"));
2134 return 0;
2135 }
2136
2137 typeinfo.defined |= NTA_HASINDEX;
2138 typeinfo.index = exp.X_add_number;
2139
2140 if (skip_past_char (&p, ']') == FAIL)
2141 {
2142 as_bad (_("expecting ]"));
2143 return 0;
2144 }
2145 }
2146
2147 namelen = nameend - newname;
2148 namebuf = alloca (namelen + 1);
2149 strncpy (namebuf, newname, namelen);
2150 namebuf[namelen] = '\0';
2151
2152 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2153 typeinfo.defined != 0 ? &typeinfo : NULL);
2154
2155 /* Insert name in all uppercase. */
2156 for (p = namebuf; *p; p++)
2157 *p = TOUPPER (*p);
2158
2159 if (strncmp (namebuf, newname, namelen))
2160 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2161 typeinfo.defined != 0 ? &typeinfo : NULL);
2162
2163 /* Insert name in all lowercase. */
2164 for (p = namebuf; *p; p++)
2165 *p = TOLOWER (*p);
2166
2167 if (strncmp (namebuf, newname, namelen))
2168 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2169 typeinfo.defined != 0 ? &typeinfo : NULL);
2170
2171 return 1;
2172 }
2173
2174 /* Should never be called, as .req goes between the alias and the
2175 register name, not at the beginning of the line. */
2176 static void
2177 s_req (int a ATTRIBUTE_UNUSED)
2178 {
2179 as_bad (_("invalid syntax for .req directive"));
2180 }
2181
2182 static void
2183 s_dn (int a ATTRIBUTE_UNUSED)
2184 {
2185 as_bad (_("invalid syntax for .dn directive"));
2186 }
2187
2188 static void
2189 s_qn (int a ATTRIBUTE_UNUSED)
2190 {
2191 as_bad (_("invalid syntax for .qn directive"));
2192 }
2193
2194 /* The .unreq directive deletes an alias which was previously defined
2195 by .req. For example:
2196
2197 my_alias .req r11
2198 .unreq my_alias */
2199
2200 static void
2201 s_unreq (int a ATTRIBUTE_UNUSED)
2202 {
2203 char * name;
2204 char saved_char;
2205
2206 name = input_line_pointer;
2207
2208 while (*input_line_pointer != 0
2209 && *input_line_pointer != ' '
2210 && *input_line_pointer != '\n')
2211 ++input_line_pointer;
2212
2213 saved_char = *input_line_pointer;
2214 *input_line_pointer = 0;
2215
2216 if (!*name)
2217 as_bad (_("invalid syntax for .unreq directive"));
2218 else
2219 {
2220 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2221
2222 if (!reg)
2223 as_bad (_("unknown register alias '%s'"), name);
2224 else if (reg->builtin)
2225 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2226 name);
2227 else
2228 {
2229 hash_delete (arm_reg_hsh, name);
2230 free ((char *) reg->name);
2231 if (reg->neon)
2232 free (reg->neon);
2233 free (reg);
2234 }
2235 }
2236
2237 *input_line_pointer = saved_char;
2238 demand_empty_rest_of_line ();
2239 }
2240
2241 /* Directives: Instruction set selection. */
2242
2243 #ifdef OBJ_ELF
2244 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2245 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2246 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2247 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2248
2249 static enum mstate mapstate = MAP_UNDEFINED;
2250
2251 static void
2252 mapping_state (enum mstate state)
2253 {
2254 symbolS * symbolP;
2255 const char * symname;
2256 int type;
2257
2258 if (mapstate == state)
2259 /* The mapping symbol has already been emitted.
2260 There is nothing else to do. */
2261 return;
2262
2263 mapstate = state;
2264
2265 switch (state)
2266 {
2267 case MAP_DATA:
2268 symname = "$d";
2269 type = BSF_NO_FLAGS;
2270 break;
2271 case MAP_ARM:
2272 symname = "$a";
2273 type = BSF_NO_FLAGS;
2274 break;
2275 case MAP_THUMB:
2276 symname = "$t";
2277 type = BSF_NO_FLAGS;
2278 break;
2279 case MAP_UNDEFINED:
2280 return;
2281 default:
2282 abort ();
2283 }
2284
2285 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2286
2287 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2288 symbol_table_insert (symbolP);
2289 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2290
2291 switch (state)
2292 {
2293 case MAP_ARM:
2294 THUMB_SET_FUNC (symbolP, 0);
2295 ARM_SET_THUMB (symbolP, 0);
2296 ARM_SET_INTERWORK (symbolP, support_interwork);
2297 break;
2298
2299 case MAP_THUMB:
2300 THUMB_SET_FUNC (symbolP, 1);
2301 ARM_SET_THUMB (symbolP, 1);
2302 ARM_SET_INTERWORK (symbolP, support_interwork);
2303 break;
2304
2305 case MAP_DATA:
2306 default:
2307 return;
2308 }
2309 }
2310 #else
2311 #define mapping_state(x) /* nothing */
2312 #endif
2313
2314 /* Find the real, Thumb encoded start of a Thumb function. */
2315
2316 static symbolS *
2317 find_real_start (symbolS * symbolP)
2318 {
2319 char * real_start;
2320 const char * name = S_GET_NAME (symbolP);
2321 symbolS * new_target;
2322
2323 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2324 #define STUB_NAME ".real_start_of"
2325
2326 if (name == NULL)
2327 abort ();
2328
2329 /* The compiler may generate BL instructions to local labels because
2330 it needs to perform a branch to a far away location. These labels
2331 do not have a corresponding ".real_start_of" label. We check
2332 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2333 the ".real_start_of" convention for nonlocal branches. */
2334 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2335 return symbolP;
2336
2337 real_start = ACONCAT ((STUB_NAME, name, NULL));
2338 new_target = symbol_find (real_start);
2339
2340 if (new_target == NULL)
2341 {
2342 as_warn ("Failed to find real start of function: %s\n", name);
2343 new_target = symbolP;
2344 }
2345
2346 return new_target;
2347 }
2348
2349 static void
2350 opcode_select (int width)
2351 {
2352 switch (width)
2353 {
2354 case 16:
2355 if (! thumb_mode)
2356 {
2357 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2358 as_bad (_("selected processor does not support THUMB opcodes"));
2359
2360 thumb_mode = 1;
2361 /* No need to force the alignment, since we will have been
2362 coming from ARM mode, which is word-aligned. */
2363 record_alignment (now_seg, 1);
2364 }
2365 mapping_state (MAP_THUMB);
2366 break;
2367
2368 case 32:
2369 if (thumb_mode)
2370 {
2371 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2372 as_bad (_("selected processor does not support ARM opcodes"));
2373
2374 thumb_mode = 0;
2375
2376 if (!need_pass_2)
2377 frag_align (2, 0, 0);
2378
2379 record_alignment (now_seg, 1);
2380 }
2381 mapping_state (MAP_ARM);
2382 break;
2383
2384 default:
2385 as_bad (_("invalid instruction size selected (%d)"), width);
2386 }
2387 }
2388
2389 static void
2390 s_arm (int ignore ATTRIBUTE_UNUSED)
2391 {
2392 opcode_select (32);
2393 demand_empty_rest_of_line ();
2394 }
2395
2396 static void
2397 s_thumb (int ignore ATTRIBUTE_UNUSED)
2398 {
2399 opcode_select (16);
2400 demand_empty_rest_of_line ();
2401 }
2402
2403 static void
2404 s_code (int unused ATTRIBUTE_UNUSED)
2405 {
2406 int temp;
2407
2408 temp = get_absolute_expression ();
2409 switch (temp)
2410 {
2411 case 16:
2412 case 32:
2413 opcode_select (temp);
2414 break;
2415
2416 default:
2417 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2418 }
2419 }
2420
2421 static void
2422 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2423 {
2424 /* If we are not already in thumb mode go into it, EVEN if
2425 the target processor does not support thumb instructions.
2426 This is used by gcc/config/arm/lib1funcs.asm for example
2427 to compile interworking support functions even if the
2428 target processor should not support interworking. */
2429 if (! thumb_mode)
2430 {
2431 thumb_mode = 2;
2432 record_alignment (now_seg, 1);
2433 }
2434
2435 demand_empty_rest_of_line ();
2436 }
2437
2438 static void
2439 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2440 {
2441 s_thumb (0);
2442
2443 /* The following label is the name/address of the start of a Thumb function.
2444 We need to know this for the interworking support. */
2445 label_is_thumb_function_name = TRUE;
2446 }
2447
2448 /* Perform a .set directive, but also mark the alias as
2449 being a thumb function. */
2450
2451 static void
2452 s_thumb_set (int equiv)
2453 {
2454 /* XXX the following is a duplicate of the code for s_set() in read.c
2455 We cannot just call that code as we need to get at the symbol that
2456 is created. */
2457 char * name;
2458 char delim;
2459 char * end_name;
2460 symbolS * symbolP;
2461
2462 /* Especial apologies for the random logic:
2463 This just grew, and could be parsed much more simply!
2464 Dean - in haste. */
2465 name = input_line_pointer;
2466 delim = get_symbol_end ();
2467 end_name = input_line_pointer;
2468 *end_name = delim;
2469
2470 if (*input_line_pointer != ',')
2471 {
2472 *end_name = 0;
2473 as_bad (_("expected comma after name \"%s\""), name);
2474 *end_name = delim;
2475 ignore_rest_of_line ();
2476 return;
2477 }
2478
2479 input_line_pointer++;
2480 *end_name = 0;
2481
2482 if (name[0] == '.' && name[1] == '\0')
2483 {
2484 /* XXX - this should not happen to .thumb_set. */
2485 abort ();
2486 }
2487
2488 if ((symbolP = symbol_find (name)) == NULL
2489 && (symbolP = md_undefined_symbol (name)) == NULL)
2490 {
2491 #ifndef NO_LISTING
2492 /* When doing symbol listings, play games with dummy fragments living
2493 outside the normal fragment chain to record the file and line info
2494 for this symbol. */
2495 if (listing & LISTING_SYMBOLS)
2496 {
2497 extern struct list_info_struct * listing_tail;
2498 fragS * dummy_frag = xmalloc (sizeof (fragS));
2499
2500 memset (dummy_frag, 0, sizeof (fragS));
2501 dummy_frag->fr_type = rs_fill;
2502 dummy_frag->line = listing_tail;
2503 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2504 dummy_frag->fr_symbol = symbolP;
2505 }
2506 else
2507 #endif
2508 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2509
2510 #ifdef OBJ_COFF
2511 /* "set" symbols are local unless otherwise specified. */
2512 SF_SET_LOCAL (symbolP);
2513 #endif /* OBJ_COFF */
2514 } /* Make a new symbol. */
2515
2516 symbol_table_insert (symbolP);
2517
2518 * end_name = delim;
2519
2520 if (equiv
2521 && S_IS_DEFINED (symbolP)
2522 && S_GET_SEGMENT (symbolP) != reg_section)
2523 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2524
2525 pseudo_set (symbolP);
2526
2527 demand_empty_rest_of_line ();
2528
2529 /* XXX Now we come to the Thumb specific bit of code. */
2530
2531 THUMB_SET_FUNC (symbolP, 1);
2532 ARM_SET_THUMB (symbolP, 1);
2533 #if defined OBJ_ELF || defined OBJ_COFF
2534 ARM_SET_INTERWORK (symbolP, support_interwork);
2535 #endif
2536 }
2537
2538 /* Directives: Mode selection. */
2539
2540 /* .syntax [unified|divided] - choose the new unified syntax
2541 (same for Arm and Thumb encoding, modulo slight differences in what
2542 can be represented) or the old divergent syntax for each mode. */
2543 static void
2544 s_syntax (int unused ATTRIBUTE_UNUSED)
2545 {
2546 char *name, delim;
2547
2548 name = input_line_pointer;
2549 delim = get_symbol_end ();
2550
2551 if (!strcasecmp (name, "unified"))
2552 unified_syntax = TRUE;
2553 else if (!strcasecmp (name, "divided"))
2554 unified_syntax = FALSE;
2555 else
2556 {
2557 as_bad (_("unrecognized syntax mode \"%s\""), name);
2558 return;
2559 }
2560 *input_line_pointer = delim;
2561 demand_empty_rest_of_line ();
2562 }
2563
2564 /* Directives: sectioning and alignment. */
2565
2566 /* Same as s_align_ptwo but align 0 => align 2. */
2567
2568 static void
2569 s_align (int unused ATTRIBUTE_UNUSED)
2570 {
2571 int temp;
2572 long temp_fill;
2573 long max_alignment = 15;
2574
2575 temp = get_absolute_expression ();
2576 if (temp > max_alignment)
2577 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2578 else if (temp < 0)
2579 {
2580 as_bad (_("alignment negative. 0 assumed."));
2581 temp = 0;
2582 }
2583
2584 if (*input_line_pointer == ',')
2585 {
2586 input_line_pointer++;
2587 temp_fill = get_absolute_expression ();
2588 }
2589 else
2590 temp_fill = 0;
2591
2592 if (!temp)
2593 temp = 2;
2594
2595 /* Only make a frag if we HAVE to. */
2596 if (temp && !need_pass_2)
2597 frag_align (temp, (int) temp_fill, 0);
2598 demand_empty_rest_of_line ();
2599
2600 record_alignment (now_seg, temp);
2601 }
2602
2603 static void
2604 s_bss (int ignore ATTRIBUTE_UNUSED)
2605 {
2606 /* We don't support putting frags in the BSS segment, we fake it by
2607 marking in_bss, then looking at s_skip for clues. */
2608 subseg_set (bss_section, 0);
2609 demand_empty_rest_of_line ();
2610 mapping_state (MAP_DATA);
2611 }
2612
2613 static void
2614 s_even (int ignore ATTRIBUTE_UNUSED)
2615 {
2616 /* Never make frag if expect extra pass. */
2617 if (!need_pass_2)
2618 frag_align (1, 0, 0);
2619
2620 record_alignment (now_seg, 1);
2621
2622 demand_empty_rest_of_line ();
2623 }
2624
2625 /* Directives: Literal pools. */
2626
2627 static literal_pool *
2628 find_literal_pool (void)
2629 {
2630 literal_pool * pool;
2631
2632 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2633 {
2634 if (pool->section == now_seg
2635 && pool->sub_section == now_subseg)
2636 break;
2637 }
2638
2639 return pool;
2640 }
2641
2642 static literal_pool *
2643 find_or_make_literal_pool (void)
2644 {
2645 /* Next literal pool ID number. */
2646 static unsigned int latest_pool_num = 1;
2647 literal_pool * pool;
2648
2649 pool = find_literal_pool ();
2650
2651 if (pool == NULL)
2652 {
2653 /* Create a new pool. */
2654 pool = xmalloc (sizeof (* pool));
2655 if (! pool)
2656 return NULL;
2657
2658 pool->next_free_entry = 0;
2659 pool->section = now_seg;
2660 pool->sub_section = now_subseg;
2661 pool->next = list_of_pools;
2662 pool->symbol = NULL;
2663
2664 /* Add it to the list. */
2665 list_of_pools = pool;
2666 }
2667
2668 /* New pools, and emptied pools, will have a NULL symbol. */
2669 if (pool->symbol == NULL)
2670 {
2671 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2672 (valueT) 0, &zero_address_frag);
2673 pool->id = latest_pool_num ++;
2674 }
2675
2676 /* Done. */
2677 return pool;
2678 }
2679
2680 /* Add the literal in the global 'inst'
2681 structure to the relevent literal pool. */
2682
2683 static int
2684 add_to_lit_pool (void)
2685 {
2686 literal_pool * pool;
2687 unsigned int entry;
2688
2689 pool = find_or_make_literal_pool ();
2690
2691 /* Check if this literal value is already in the pool. */
2692 for (entry = 0; entry < pool->next_free_entry; entry ++)
2693 {
2694 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2695 && (inst.reloc.exp.X_op == O_constant)
2696 && (pool->literals[entry].X_add_number
2697 == inst.reloc.exp.X_add_number)
2698 && (pool->literals[entry].X_unsigned
2699 == inst.reloc.exp.X_unsigned))
2700 break;
2701
2702 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2703 && (inst.reloc.exp.X_op == O_symbol)
2704 && (pool->literals[entry].X_add_number
2705 == inst.reloc.exp.X_add_number)
2706 && (pool->literals[entry].X_add_symbol
2707 == inst.reloc.exp.X_add_symbol)
2708 && (pool->literals[entry].X_op_symbol
2709 == inst.reloc.exp.X_op_symbol))
2710 break;
2711 }
2712
2713 /* Do we need to create a new entry? */
2714 if (entry == pool->next_free_entry)
2715 {
2716 if (entry >= MAX_LITERAL_POOL_SIZE)
2717 {
2718 inst.error = _("literal pool overflow");
2719 return FAIL;
2720 }
2721
2722 pool->literals[entry] = inst.reloc.exp;
2723 pool->next_free_entry += 1;
2724 }
2725
2726 inst.reloc.exp.X_op = O_symbol;
2727 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2728 inst.reloc.exp.X_add_symbol = pool->symbol;
2729
2730 return SUCCESS;
2731 }
2732
2733 /* Can't use symbol_new here, so have to create a symbol and then at
2734 a later date assign it a value. Thats what these functions do. */
2735
2736 static void
2737 symbol_locate (symbolS * symbolP,
2738 const char * name, /* It is copied, the caller can modify. */
2739 segT segment, /* Segment identifier (SEG_<something>). */
2740 valueT valu, /* Symbol value. */
2741 fragS * frag) /* Associated fragment. */
2742 {
2743 unsigned int name_length;
2744 char * preserved_copy_of_name;
2745
2746 name_length = strlen (name) + 1; /* +1 for \0. */
2747 obstack_grow (&notes, name, name_length);
2748 preserved_copy_of_name = obstack_finish (&notes);
2749
2750 #ifdef tc_canonicalize_symbol_name
2751 preserved_copy_of_name =
2752 tc_canonicalize_symbol_name (preserved_copy_of_name);
2753 #endif
2754
2755 S_SET_NAME (symbolP, preserved_copy_of_name);
2756
2757 S_SET_SEGMENT (symbolP, segment);
2758 S_SET_VALUE (symbolP, valu);
2759 symbol_clear_list_pointers (symbolP);
2760
2761 symbol_set_frag (symbolP, frag);
2762
2763 /* Link to end of symbol chain. */
2764 {
2765 extern int symbol_table_frozen;
2766
2767 if (symbol_table_frozen)
2768 abort ();
2769 }
2770
2771 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2772
2773 obj_symbol_new_hook (symbolP);
2774
2775 #ifdef tc_symbol_new_hook
2776 tc_symbol_new_hook (symbolP);
2777 #endif
2778
2779 #ifdef DEBUG_SYMS
2780 verify_symbol_chain (symbol_rootP, symbol_lastP);
2781 #endif /* DEBUG_SYMS */
2782 }
2783
2784
2785 static void
2786 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2787 {
2788 unsigned int entry;
2789 literal_pool * pool;
2790 char sym_name[20];
2791
2792 pool = find_literal_pool ();
2793 if (pool == NULL
2794 || pool->symbol == NULL
2795 || pool->next_free_entry == 0)
2796 return;
2797
2798 mapping_state (MAP_DATA);
2799
2800 /* Align pool as you have word accesses.
2801 Only make a frag if we have to. */
2802 if (!need_pass_2)
2803 frag_align (2, 0, 0);
2804
2805 record_alignment (now_seg, 2);
2806
2807 sprintf (sym_name, "$$lit_\002%x", pool->id);
2808
2809 symbol_locate (pool->symbol, sym_name, now_seg,
2810 (valueT) frag_now_fix (), frag_now);
2811 symbol_table_insert (pool->symbol);
2812
2813 ARM_SET_THUMB (pool->symbol, thumb_mode);
2814
2815 #if defined OBJ_COFF || defined OBJ_ELF
2816 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2817 #endif
2818
2819 for (entry = 0; entry < pool->next_free_entry; entry ++)
2820 /* First output the expression in the instruction to the pool. */
2821 emit_expr (&(pool->literals[entry]), 4); /* .word */
2822
2823 /* Mark the pool as empty. */
2824 pool->next_free_entry = 0;
2825 pool->symbol = NULL;
2826 }
2827
2828 #ifdef OBJ_ELF
2829 /* Forward declarations for functions below, in the MD interface
2830 section. */
2831 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2832 static valueT create_unwind_entry (int);
2833 static void start_unwind_section (const segT, int);
2834 static void add_unwind_opcode (valueT, int);
2835 static void flush_pending_unwind (void);
2836
2837 /* Directives: Data. */
2838
2839 static void
2840 s_arm_elf_cons (int nbytes)
2841 {
2842 expressionS exp;
2843
2844 #ifdef md_flush_pending_output
2845 md_flush_pending_output ();
2846 #endif
2847
2848 if (is_it_end_of_statement ())
2849 {
2850 demand_empty_rest_of_line ();
2851 return;
2852 }
2853
2854 #ifdef md_cons_align
2855 md_cons_align (nbytes);
2856 #endif
2857
2858 mapping_state (MAP_DATA);
2859 do
2860 {
2861 int reloc;
2862 char *base = input_line_pointer;
2863
2864 expression (& exp);
2865
2866 if (exp.X_op != O_symbol)
2867 emit_expr (&exp, (unsigned int) nbytes);
2868 else
2869 {
2870 char *before_reloc = input_line_pointer;
2871 reloc = parse_reloc (&input_line_pointer);
2872 if (reloc == -1)
2873 {
2874 as_bad (_("unrecognized relocation suffix"));
2875 ignore_rest_of_line ();
2876 return;
2877 }
2878 else if (reloc == BFD_RELOC_UNUSED)
2879 emit_expr (&exp, (unsigned int) nbytes);
2880 else
2881 {
2882 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2883 int size = bfd_get_reloc_size (howto);
2884
2885 if (reloc == BFD_RELOC_ARM_PLT32)
2886 {
2887 as_bad (_("(plt) is only valid on branch targets"));
2888 reloc = BFD_RELOC_UNUSED;
2889 size = 0;
2890 }
2891
2892 if (size > nbytes)
2893 as_bad (_("%s relocations do not fit in %d bytes"),
2894 howto->name, nbytes);
2895 else
2896 {
2897 /* We've parsed an expression stopping at O_symbol.
2898 But there may be more expression left now that we
2899 have parsed the relocation marker. Parse it again.
2900 XXX Surely there is a cleaner way to do this. */
2901 char *p = input_line_pointer;
2902 int offset;
2903 char *save_buf = alloca (input_line_pointer - base);
2904 memcpy (save_buf, base, input_line_pointer - base);
2905 memmove (base + (input_line_pointer - before_reloc),
2906 base, before_reloc - base);
2907
2908 input_line_pointer = base + (input_line_pointer-before_reloc);
2909 expression (&exp);
2910 memcpy (base, save_buf, p - base);
2911
2912 offset = nbytes - size;
2913 p = frag_more ((int) nbytes);
2914 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2915 size, &exp, 0, reloc);
2916 }
2917 }
2918 }
2919 }
2920 while (*input_line_pointer++ == ',');
2921
2922 /* Put terminator back into stream. */
2923 input_line_pointer --;
2924 demand_empty_rest_of_line ();
2925 }
2926
2927
2928 /* Parse a .rel31 directive. */
2929
2930 static void
2931 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2932 {
2933 expressionS exp;
2934 char *p;
2935 valueT highbit;
2936
2937 highbit = 0;
2938 if (*input_line_pointer == '1')
2939 highbit = 0x80000000;
2940 else if (*input_line_pointer != '0')
2941 as_bad (_("expected 0 or 1"));
2942
2943 input_line_pointer++;
2944 if (*input_line_pointer != ',')
2945 as_bad (_("missing comma"));
2946 input_line_pointer++;
2947
2948 #ifdef md_flush_pending_output
2949 md_flush_pending_output ();
2950 #endif
2951
2952 #ifdef md_cons_align
2953 md_cons_align (4);
2954 #endif
2955
2956 mapping_state (MAP_DATA);
2957
2958 expression (&exp);
2959
2960 p = frag_more (4);
2961 md_number_to_chars (p, highbit, 4);
2962 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2963 BFD_RELOC_ARM_PREL31);
2964
2965 demand_empty_rest_of_line ();
2966 }
2967
2968 /* Directives: AEABI stack-unwind tables. */
2969
2970 /* Parse an unwind_fnstart directive. Simply records the current location. */
2971
2972 static void
2973 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
2974 {
2975 demand_empty_rest_of_line ();
2976 /* Mark the start of the function. */
2977 unwind.proc_start = expr_build_dot ();
2978
2979 /* Reset the rest of the unwind info. */
2980 unwind.opcode_count = 0;
2981 unwind.table_entry = NULL;
2982 unwind.personality_routine = NULL;
2983 unwind.personality_index = -1;
2984 unwind.frame_size = 0;
2985 unwind.fp_offset = 0;
2986 unwind.fp_reg = 13;
2987 unwind.fp_used = 0;
2988 unwind.sp_restored = 0;
2989 }
2990
2991
2992 /* Parse a handlerdata directive. Creates the exception handling table entry
2993 for the function. */
2994
2995 static void
2996 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
2997 {
2998 demand_empty_rest_of_line ();
2999 if (unwind.table_entry)
3000 as_bad (_("dupicate .handlerdata directive"));
3001
3002 create_unwind_entry (1);
3003 }
3004
3005 /* Parse an unwind_fnend directive. Generates the index table entry. */
3006
3007 static void
3008 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3009 {
3010 long where;
3011 char *ptr;
3012 valueT val;
3013
3014 demand_empty_rest_of_line ();
3015
3016 /* Add eh table entry. */
3017 if (unwind.table_entry == NULL)
3018 val = create_unwind_entry (0);
3019 else
3020 val = 0;
3021
3022 /* Add index table entry. This is two words. */
3023 start_unwind_section (unwind.saved_seg, 1);
3024 frag_align (2, 0, 0);
3025 record_alignment (now_seg, 2);
3026
3027 ptr = frag_more (8);
3028 where = frag_now_fix () - 8;
3029
3030 /* Self relative offset of the function start. */
3031 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3032 BFD_RELOC_ARM_PREL31);
3033
3034 /* Indicate dependency on EHABI-defined personality routines to the
3035 linker, if it hasn't been done already. */
3036 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3037 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3038 {
3039 static const char *const name[] = {
3040 "__aeabi_unwind_cpp_pr0",
3041 "__aeabi_unwind_cpp_pr1",
3042 "__aeabi_unwind_cpp_pr2"
3043 };
3044 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3045 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3046 marked_pr_dependency |= 1 << unwind.personality_index;
3047 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3048 = marked_pr_dependency;
3049 }
3050
3051 if (val)
3052 /* Inline exception table entry. */
3053 md_number_to_chars (ptr + 4, val, 4);
3054 else
3055 /* Self relative offset of the table entry. */
3056 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3057 BFD_RELOC_ARM_PREL31);
3058
3059 /* Restore the original section. */
3060 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3061 }
3062
3063
3064 /* Parse an unwind_cantunwind directive. */
3065
3066 static void
3067 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3068 {
3069 demand_empty_rest_of_line ();
3070 if (unwind.personality_routine || unwind.personality_index != -1)
3071 as_bad (_("personality routine specified for cantunwind frame"));
3072
3073 unwind.personality_index = -2;
3074 }
3075
3076
3077 /* Parse a personalityindex directive. */
3078
3079 static void
3080 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3081 {
3082 expressionS exp;
3083
3084 if (unwind.personality_routine || unwind.personality_index != -1)
3085 as_bad (_("duplicate .personalityindex directive"));
3086
3087 expression (&exp);
3088
3089 if (exp.X_op != O_constant
3090 || exp.X_add_number < 0 || exp.X_add_number > 15)
3091 {
3092 as_bad (_("bad personality routine number"));
3093 ignore_rest_of_line ();
3094 return;
3095 }
3096
3097 unwind.personality_index = exp.X_add_number;
3098
3099 demand_empty_rest_of_line ();
3100 }
3101
3102
3103 /* Parse a personality directive. */
3104
3105 static void
3106 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3107 {
3108 char *name, *p, c;
3109
3110 if (unwind.personality_routine || unwind.personality_index != -1)
3111 as_bad (_("duplicate .personality directive"));
3112
3113 name = input_line_pointer;
3114 c = get_symbol_end ();
3115 p = input_line_pointer;
3116 unwind.personality_routine = symbol_find_or_make (name);
3117 *p = c;
3118 demand_empty_rest_of_line ();
3119 }
3120
3121
3122 /* Parse a directive saving core registers. */
3123
3124 static void
3125 s_arm_unwind_save_core (void)
3126 {
3127 valueT op;
3128 long range;
3129 int n;
3130
3131 range = parse_reg_list (&input_line_pointer);
3132 if (range == FAIL)
3133 {
3134 as_bad (_("expected register list"));
3135 ignore_rest_of_line ();
3136 return;
3137 }
3138
3139 demand_empty_rest_of_line ();
3140
3141 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3142 into .unwind_save {..., sp...}. We aren't bothered about the value of
3143 ip because it is clobbered by calls. */
3144 if (unwind.sp_restored && unwind.fp_reg == 12
3145 && (range & 0x3000) == 0x1000)
3146 {
3147 unwind.opcode_count--;
3148 unwind.sp_restored = 0;
3149 range = (range | 0x2000) & ~0x1000;
3150 unwind.pending_offset = 0;
3151 }
3152
3153 /* Pop r4-r15. */
3154 if (range & 0xfff0)
3155 {
3156 /* See if we can use the short opcodes. These pop a block of up to 8
3157 registers starting with r4, plus maybe r14. */
3158 for (n = 0; n < 8; n++)
3159 {
3160 /* Break at the first non-saved register. */
3161 if ((range & (1 << (n + 4))) == 0)
3162 break;
3163 }
3164 /* See if there are any other bits set. */
3165 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3166 {
3167 /* Use the long form. */
3168 op = 0x8000 | ((range >> 4) & 0xfff);
3169 add_unwind_opcode (op, 2);
3170 }
3171 else
3172 {
3173 /* Use the short form. */
3174 if (range & 0x4000)
3175 op = 0xa8; /* Pop r14. */
3176 else
3177 op = 0xa0; /* Do not pop r14. */
3178 op |= (n - 1);
3179 add_unwind_opcode (op, 1);
3180 }
3181 }
3182
3183 /* Pop r0-r3. */
3184 if (range & 0xf)
3185 {
3186 op = 0xb100 | (range & 0xf);
3187 add_unwind_opcode (op, 2);
3188 }
3189
3190 /* Record the number of bytes pushed. */
3191 for (n = 0; n < 16; n++)
3192 {
3193 if (range & (1 << n))
3194 unwind.frame_size += 4;
3195 }
3196 }
3197
3198
3199 /* Parse a directive saving FPA registers. */
3200
3201 static void
3202 s_arm_unwind_save_fpa (int reg)
3203 {
3204 expressionS exp;
3205 int num_regs;
3206 valueT op;
3207
3208 /* Get Number of registers to transfer. */
3209 if (skip_past_comma (&input_line_pointer) != FAIL)
3210 expression (&exp);
3211 else
3212 exp.X_op = O_illegal;
3213
3214 if (exp.X_op != O_constant)
3215 {
3216 as_bad (_("expected , <constant>"));
3217 ignore_rest_of_line ();
3218 return;
3219 }
3220
3221 num_regs = exp.X_add_number;
3222
3223 if (num_regs < 1 || num_regs > 4)
3224 {
3225 as_bad (_("number of registers must be in the range [1:4]"));
3226 ignore_rest_of_line ();
3227 return;
3228 }
3229
3230 demand_empty_rest_of_line ();
3231
3232 if (reg == 4)
3233 {
3234 /* Short form. */
3235 op = 0xb4 | (num_regs - 1);
3236 add_unwind_opcode (op, 1);
3237 }
3238 else
3239 {
3240 /* Long form. */
3241 op = 0xc800 | (reg << 4) | (num_regs - 1);
3242 add_unwind_opcode (op, 2);
3243 }
3244 unwind.frame_size += num_regs * 12;
3245 }
3246
3247
3248 /* Parse a directive saving VFP registers. */
3249
3250 static void
3251 s_arm_unwind_save_vfp (void)
3252 {
3253 int count;
3254 unsigned int reg;
3255 valueT op;
3256
3257 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3258 if (count == FAIL)
3259 {
3260 as_bad (_("expected register list"));
3261 ignore_rest_of_line ();
3262 return;
3263 }
3264
3265 demand_empty_rest_of_line ();
3266
3267 if (reg == 8)
3268 {
3269 /* Short form. */
3270 op = 0xb8 | (count - 1);
3271 add_unwind_opcode (op, 1);
3272 }
3273 else
3274 {
3275 /* Long form. */
3276 op = 0xb300 | (reg << 4) | (count - 1);
3277 add_unwind_opcode (op, 2);
3278 }
3279 unwind.frame_size += count * 8 + 4;
3280 }
3281
3282
3283 /* Parse a directive saving iWMMXt data registers. */
3284
3285 static void
3286 s_arm_unwind_save_mmxwr (void)
3287 {
3288 int reg;
3289 int hi_reg;
3290 int i;
3291 unsigned mask = 0;
3292 valueT op;
3293
3294 if (*input_line_pointer == '{')
3295 input_line_pointer++;
3296
3297 do
3298 {
3299 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3300
3301 if (reg == FAIL)
3302 {
3303 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3304 goto error;
3305 }
3306
3307 if (mask >> reg)
3308 as_tsktsk (_("register list not in ascending order"));
3309 mask |= 1 << reg;
3310
3311 if (*input_line_pointer == '-')
3312 {
3313 input_line_pointer++;
3314 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3315 if (hi_reg == FAIL)
3316 {
3317 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3318 goto error;
3319 }
3320 else if (reg >= hi_reg)
3321 {
3322 as_bad (_("bad register range"));
3323 goto error;
3324 }
3325 for (; reg < hi_reg; reg++)
3326 mask |= 1 << reg;
3327 }
3328 }
3329 while (skip_past_comma (&input_line_pointer) != FAIL);
3330
3331 if (*input_line_pointer == '}')
3332 input_line_pointer++;
3333
3334 demand_empty_rest_of_line ();
3335
3336 /* Generate any deferred opcodes because we're going to be looking at
3337 the list. */
3338 flush_pending_unwind ();
3339
3340 for (i = 0; i < 16; i++)
3341 {
3342 if (mask & (1 << i))
3343 unwind.frame_size += 8;
3344 }
3345
3346 /* Attempt to combine with a previous opcode. We do this because gcc
3347 likes to output separate unwind directives for a single block of
3348 registers. */
3349 if (unwind.opcode_count > 0)
3350 {
3351 i = unwind.opcodes[unwind.opcode_count - 1];
3352 if ((i & 0xf8) == 0xc0)
3353 {
3354 i &= 7;
3355 /* Only merge if the blocks are contiguous. */
3356 if (i < 6)
3357 {
3358 if ((mask & 0xfe00) == (1 << 9))
3359 {
3360 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3361 unwind.opcode_count--;
3362 }
3363 }
3364 else if (i == 6 && unwind.opcode_count >= 2)
3365 {
3366 i = unwind.opcodes[unwind.opcode_count - 2];
3367 reg = i >> 4;
3368 i &= 0xf;
3369
3370 op = 0xffff << (reg - 1);
3371 if (reg > 0
3372 || ((mask & op) == (1u << (reg - 1))))
3373 {
3374 op = (1 << (reg + i + 1)) - 1;
3375 op &= ~((1 << reg) - 1);
3376 mask |= op;
3377 unwind.opcode_count -= 2;
3378 }
3379 }
3380 }
3381 }
3382
3383 hi_reg = 15;
3384 /* We want to generate opcodes in the order the registers have been
3385 saved, ie. descending order. */
3386 for (reg = 15; reg >= -1; reg--)
3387 {
3388 /* Save registers in blocks. */
3389 if (reg < 0
3390 || !(mask & (1 << reg)))
3391 {
3392 /* We found an unsaved reg. Generate opcodes to save the
3393 preceeding block. */
3394 if (reg != hi_reg)
3395 {
3396 if (reg == 9)
3397 {
3398 /* Short form. */
3399 op = 0xc0 | (hi_reg - 10);
3400 add_unwind_opcode (op, 1);
3401 }
3402 else
3403 {
3404 /* Long form. */
3405 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3406 add_unwind_opcode (op, 2);
3407 }
3408 }
3409 hi_reg = reg - 1;
3410 }
3411 }
3412
3413 return;
3414 error:
3415 ignore_rest_of_line ();
3416 }
3417
3418 static void
3419 s_arm_unwind_save_mmxwcg (void)
3420 {
3421 int reg;
3422 int hi_reg;
3423 unsigned mask = 0;
3424 valueT op;
3425
3426 if (*input_line_pointer == '{')
3427 input_line_pointer++;
3428
3429 do
3430 {
3431 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3432
3433 if (reg == FAIL)
3434 {
3435 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3436 goto error;
3437 }
3438
3439 reg -= 8;
3440 if (mask >> reg)
3441 as_tsktsk (_("register list not in ascending order"));
3442 mask |= 1 << reg;
3443
3444 if (*input_line_pointer == '-')
3445 {
3446 input_line_pointer++;
3447 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3448 if (hi_reg == FAIL)
3449 {
3450 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3451 goto error;
3452 }
3453 else if (reg >= hi_reg)
3454 {
3455 as_bad (_("bad register range"));
3456 goto error;
3457 }
3458 for (; reg < hi_reg; reg++)
3459 mask |= 1 << reg;
3460 }
3461 }
3462 while (skip_past_comma (&input_line_pointer) != FAIL);
3463
3464 if (*input_line_pointer == '}')
3465 input_line_pointer++;
3466
3467 demand_empty_rest_of_line ();
3468
3469 /* Generate any deferred opcodes because we're going to be looking at
3470 the list. */
3471 flush_pending_unwind ();
3472
3473 for (reg = 0; reg < 16; reg++)
3474 {
3475 if (mask & (1 << reg))
3476 unwind.frame_size += 4;
3477 }
3478 op = 0xc700 | mask;
3479 add_unwind_opcode (op, 2);
3480 return;
3481 error:
3482 ignore_rest_of_line ();
3483 }
3484
3485
3486 /* Parse an unwind_save directive. */
3487
3488 static void
3489 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED)
3490 {
3491 char *peek;
3492 struct reg_entry *reg;
3493 bfd_boolean had_brace = FALSE;
3494
3495 /* Figure out what sort of save we have. */
3496 peek = input_line_pointer;
3497
3498 if (*peek == '{')
3499 {
3500 had_brace = TRUE;
3501 peek++;
3502 }
3503
3504 reg = arm_reg_parse_multi (&peek);
3505
3506 if (!reg)
3507 {
3508 as_bad (_("register expected"));
3509 ignore_rest_of_line ();
3510 return;
3511 }
3512
3513 switch (reg->type)
3514 {
3515 case REG_TYPE_FN:
3516 if (had_brace)
3517 {
3518 as_bad (_("FPA .unwind_save does not take a register list"));
3519 ignore_rest_of_line ();
3520 return;
3521 }
3522 s_arm_unwind_save_fpa (reg->number);
3523 return;
3524
3525 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3526 case REG_TYPE_VFD: s_arm_unwind_save_vfp (); return;
3527 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3528 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3529
3530 default:
3531 as_bad (_(".unwind_save does not support this kind of register"));
3532 ignore_rest_of_line ();
3533 }
3534 }
3535
3536
3537 /* Parse an unwind_movsp directive. */
3538
3539 static void
3540 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3541 {
3542 int reg;
3543 valueT op;
3544
3545 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3546 if (reg == FAIL)
3547 {
3548 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3549 ignore_rest_of_line ();
3550 return;
3551 }
3552 demand_empty_rest_of_line ();
3553
3554 if (reg == REG_SP || reg == REG_PC)
3555 {
3556 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3557 return;
3558 }
3559
3560 if (unwind.fp_reg != REG_SP)
3561 as_bad (_("unexpected .unwind_movsp directive"));
3562
3563 /* Generate opcode to restore the value. */
3564 op = 0x90 | reg;
3565 add_unwind_opcode (op, 1);
3566
3567 /* Record the information for later. */
3568 unwind.fp_reg = reg;
3569 unwind.fp_offset = unwind.frame_size;
3570 unwind.sp_restored = 1;
3571 }
3572
3573 /* Parse an unwind_pad directive. */
3574
3575 static void
3576 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3577 {
3578 int offset;
3579
3580 if (immediate_for_directive (&offset) == FAIL)
3581 return;
3582
3583 if (offset & 3)
3584 {
3585 as_bad (_("stack increment must be multiple of 4"));
3586 ignore_rest_of_line ();
3587 return;
3588 }
3589
3590 /* Don't generate any opcodes, just record the details for later. */
3591 unwind.frame_size += offset;
3592 unwind.pending_offset += offset;
3593
3594 demand_empty_rest_of_line ();
3595 }
3596
3597 /* Parse an unwind_setfp directive. */
3598
3599 static void
3600 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3601 {
3602 int sp_reg;
3603 int fp_reg;
3604 int offset;
3605
3606 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3607 if (skip_past_comma (&input_line_pointer) == FAIL)
3608 sp_reg = FAIL;
3609 else
3610 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3611
3612 if (fp_reg == FAIL || sp_reg == FAIL)
3613 {
3614 as_bad (_("expected <reg>, <reg>"));
3615 ignore_rest_of_line ();
3616 return;
3617 }
3618
3619 /* Optional constant. */
3620 if (skip_past_comma (&input_line_pointer) != FAIL)
3621 {
3622 if (immediate_for_directive (&offset) == FAIL)
3623 return;
3624 }
3625 else
3626 offset = 0;
3627
3628 demand_empty_rest_of_line ();
3629
3630 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3631 {
3632 as_bad (_("register must be either sp or set by a previous"
3633 "unwind_movsp directive"));
3634 return;
3635 }
3636
3637 /* Don't generate any opcodes, just record the information for later. */
3638 unwind.fp_reg = fp_reg;
3639 unwind.fp_used = 1;
3640 if (sp_reg == 13)
3641 unwind.fp_offset = unwind.frame_size - offset;
3642 else
3643 unwind.fp_offset -= offset;
3644 }
3645
3646 /* Parse an unwind_raw directive. */
3647
3648 static void
3649 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3650 {
3651 expressionS exp;
3652 /* This is an arbitrary limit. */
3653 unsigned char op[16];
3654 int count;
3655
3656 expression (&exp);
3657 if (exp.X_op == O_constant
3658 && skip_past_comma (&input_line_pointer) != FAIL)
3659 {
3660 unwind.frame_size += exp.X_add_number;
3661 expression (&exp);
3662 }
3663 else
3664 exp.X_op = O_illegal;
3665
3666 if (exp.X_op != O_constant)
3667 {
3668 as_bad (_("expected <offset>, <opcode>"));
3669 ignore_rest_of_line ();
3670 return;
3671 }
3672
3673 count = 0;
3674
3675 /* Parse the opcode. */
3676 for (;;)
3677 {
3678 if (count >= 16)
3679 {
3680 as_bad (_("unwind opcode too long"));
3681 ignore_rest_of_line ();
3682 }
3683 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3684 {
3685 as_bad (_("invalid unwind opcode"));
3686 ignore_rest_of_line ();
3687 return;
3688 }
3689 op[count++] = exp.X_add_number;
3690
3691 /* Parse the next byte. */
3692 if (skip_past_comma (&input_line_pointer) == FAIL)
3693 break;
3694
3695 expression (&exp);
3696 }
3697
3698 /* Add the opcode bytes in reverse order. */
3699 while (count--)
3700 add_unwind_opcode (op[count], 1);
3701
3702 demand_empty_rest_of_line ();
3703 }
3704
3705
3706 /* Parse a .eabi_attribute directive. */
3707
3708 static void
3709 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3710 {
3711 expressionS exp;
3712 bfd_boolean is_string;
3713 int tag;
3714 unsigned int i = 0;
3715 char *s = NULL;
3716 char saved_char;
3717
3718 expression (& exp);
3719 if (exp.X_op != O_constant)
3720 goto bad;
3721
3722 tag = exp.X_add_number;
3723 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3724 is_string = 1;
3725 else
3726 is_string = 0;
3727
3728 if (skip_past_comma (&input_line_pointer) == FAIL)
3729 goto bad;
3730 if (tag == 32 || !is_string)
3731 {
3732 expression (& exp);
3733 if (exp.X_op != O_constant)
3734 {
3735 as_bad (_("expected numeric constant"));
3736 ignore_rest_of_line ();
3737 return;
3738 }
3739 i = exp.X_add_number;
3740 }
3741 if (tag == Tag_compatibility
3742 && skip_past_comma (&input_line_pointer) == FAIL)
3743 {
3744 as_bad (_("expected comma"));
3745 ignore_rest_of_line ();
3746 return;
3747 }
3748 if (is_string)
3749 {
3750 skip_whitespace(input_line_pointer);
3751 if (*input_line_pointer != '"')
3752 goto bad_string;
3753 input_line_pointer++;
3754 s = input_line_pointer;
3755 while (*input_line_pointer && *input_line_pointer != '"')
3756 input_line_pointer++;
3757 if (*input_line_pointer != '"')
3758 goto bad_string;
3759 saved_char = *input_line_pointer;
3760 *input_line_pointer = 0;
3761 }
3762 else
3763 {
3764 s = NULL;
3765 saved_char = 0;
3766 }
3767
3768 if (tag == Tag_compatibility)
3769 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3770 else if (is_string)
3771 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3772 else
3773 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3774
3775 if (s)
3776 {
3777 *input_line_pointer = saved_char;
3778 input_line_pointer++;
3779 }
3780 demand_empty_rest_of_line ();
3781 return;
3782 bad_string:
3783 as_bad (_("bad string constant"));
3784 ignore_rest_of_line ();
3785 return;
3786 bad:
3787 as_bad (_("expected <tag> , <value>"));
3788 ignore_rest_of_line ();
3789 }
3790 #endif /* OBJ_ELF */
3791
3792 static void s_arm_arch (int);
3793 static void s_arm_cpu (int);
3794 static void s_arm_fpu (int);
3795
3796 /* This table describes all the machine specific pseudo-ops the assembler
3797 has to support. The fields are:
3798 pseudo-op name without dot
3799 function to call to execute this pseudo-op
3800 Integer arg to pass to the function. */
3801
3802 const pseudo_typeS md_pseudo_table[] =
3803 {
3804 /* Never called because '.req' does not start a line. */
3805 { "req", s_req, 0 },
3806 /* Following two are likewise never called. */
3807 { "dn", s_dn, 0 },
3808 { "qn", s_qn, 0 },
3809 { "unreq", s_unreq, 0 },
3810 { "bss", s_bss, 0 },
3811 { "align", s_align, 0 },
3812 { "arm", s_arm, 0 },
3813 { "thumb", s_thumb, 0 },
3814 { "code", s_code, 0 },
3815 { "force_thumb", s_force_thumb, 0 },
3816 { "thumb_func", s_thumb_func, 0 },
3817 { "thumb_set", s_thumb_set, 0 },
3818 { "even", s_even, 0 },
3819 { "ltorg", s_ltorg, 0 },
3820 { "pool", s_ltorg, 0 },
3821 { "syntax", s_syntax, 0 },
3822 { "cpu", s_arm_cpu, 0 },
3823 { "arch", s_arm_arch, 0 },
3824 { "fpu", s_arm_fpu, 0 },
3825 #ifdef OBJ_ELF
3826 { "word", s_arm_elf_cons, 4 },
3827 { "long", s_arm_elf_cons, 4 },
3828 { "rel31", s_arm_rel31, 0 },
3829 { "fnstart", s_arm_unwind_fnstart, 0 },
3830 { "fnend", s_arm_unwind_fnend, 0 },
3831 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3832 { "personality", s_arm_unwind_personality, 0 },
3833 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3834 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3835 { "save", s_arm_unwind_save, 0 },
3836 { "movsp", s_arm_unwind_movsp, 0 },
3837 { "pad", s_arm_unwind_pad, 0 },
3838 { "setfp", s_arm_unwind_setfp, 0 },
3839 { "unwind_raw", s_arm_unwind_raw, 0 },
3840 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3841 #else
3842 { "word", cons, 4},
3843 #endif
3844 { "extend", float_cons, 'x' },
3845 { "ldouble", float_cons, 'x' },
3846 { "packed", float_cons, 'p' },
3847 { 0, 0, 0 }
3848 };
3849 \f
3850 /* Parser functions used exclusively in instruction operands. */
3851
3852 /* Generic immediate-value read function for use in insn parsing.
3853 STR points to the beginning of the immediate (the leading #);
3854 VAL receives the value; if the value is outside [MIN, MAX]
3855 issue an error. PREFIX_OPT is true if the immediate prefix is
3856 optional. */
3857
3858 static int
3859 parse_immediate (char **str, int *val, int min, int max,
3860 bfd_boolean prefix_opt)
3861 {
3862 expressionS exp;
3863 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3864 if (exp.X_op != O_constant)
3865 {
3866 inst.error = _("constant expression required");
3867 return FAIL;
3868 }
3869
3870 if (exp.X_add_number < min || exp.X_add_number > max)
3871 {
3872 inst.error = _("immediate value out of range");
3873 return FAIL;
3874 }
3875
3876 *val = exp.X_add_number;
3877 return SUCCESS;
3878 }
3879
3880 /* Less-generic immediate-value read function with the possibility of loading a
3881 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3882 instructions. Puts the result directly in inst.operands[i]. */
3883
3884 static int
3885 parse_big_immediate (char **str, int i)
3886 {
3887 expressionS exp;
3888 char *ptr = *str;
3889
3890 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
3891
3892 if (exp.X_op == O_constant)
3893 inst.operands[i].imm = exp.X_add_number;
3894 else if (exp.X_op == O_big
3895 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
3896 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
3897 {
3898 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
3899 /* Bignums have their least significant bits in
3900 generic_bignum[0]. Make sure we put 32 bits in imm and
3901 32 bits in reg, in a (hopefully) portable way. */
3902 assert (parts != 0);
3903 inst.operands[i].imm = 0;
3904 for (j = 0; j < parts; j++, idx++)
3905 inst.operands[i].imm |= generic_bignum[idx]
3906 << (LITTLENUM_NUMBER_OF_BITS * j);
3907 inst.operands[i].reg = 0;
3908 for (j = 0; j < parts; j++, idx++)
3909 inst.operands[i].reg |= generic_bignum[idx]
3910 << (LITTLENUM_NUMBER_OF_BITS * j);
3911 inst.operands[i].regisimm = 1;
3912 }
3913 else
3914 return FAIL;
3915
3916 *str = ptr;
3917
3918 return SUCCESS;
3919 }
3920
3921 /* Returns the pseudo-register number of an FPA immediate constant,
3922 or FAIL if there isn't a valid constant here. */
3923
3924 static int
3925 parse_fpa_immediate (char ** str)
3926 {
3927 LITTLENUM_TYPE words[MAX_LITTLENUMS];
3928 char * save_in;
3929 expressionS exp;
3930 int i;
3931 int j;
3932
3933 /* First try and match exact strings, this is to guarantee
3934 that some formats will work even for cross assembly. */
3935
3936 for (i = 0; fp_const[i]; i++)
3937 {
3938 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
3939 {
3940 char *start = *str;
3941
3942 *str += strlen (fp_const[i]);
3943 if (is_end_of_line[(unsigned char) **str])
3944 return i + 8;
3945 *str = start;
3946 }
3947 }
3948
3949 /* Just because we didn't get a match doesn't mean that the constant
3950 isn't valid, just that it is in a format that we don't
3951 automatically recognize. Try parsing it with the standard
3952 expression routines. */
3953
3954 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
3955
3956 /* Look for a raw floating point number. */
3957 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
3958 && is_end_of_line[(unsigned char) *save_in])
3959 {
3960 for (i = 0; i < NUM_FLOAT_VALS; i++)
3961 {
3962 for (j = 0; j < MAX_LITTLENUMS; j++)
3963 {
3964 if (words[j] != fp_values[i][j])
3965 break;
3966 }
3967
3968 if (j == MAX_LITTLENUMS)
3969 {
3970 *str = save_in;
3971 return i + 8;
3972 }
3973 }
3974 }
3975
3976 /* Try and parse a more complex expression, this will probably fail
3977 unless the code uses a floating point prefix (eg "0f"). */
3978 save_in = input_line_pointer;
3979 input_line_pointer = *str;
3980 if (expression (&exp) == absolute_section
3981 && exp.X_op == O_big
3982 && exp.X_add_number < 0)
3983 {
3984 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3985 Ditto for 15. */
3986 if (gen_to_words (words, 5, (long) 15) == 0)
3987 {
3988 for (i = 0; i < NUM_FLOAT_VALS; i++)
3989 {
3990 for (j = 0; j < MAX_LITTLENUMS; j++)
3991 {
3992 if (words[j] != fp_values[i][j])
3993 break;
3994 }
3995
3996 if (j == MAX_LITTLENUMS)
3997 {
3998 *str = input_line_pointer;
3999 input_line_pointer = save_in;
4000 return i + 8;
4001 }
4002 }
4003 }
4004 }
4005
4006 *str = input_line_pointer;
4007 input_line_pointer = save_in;
4008 inst.error = _("invalid FPA immediate expression");
4009 return FAIL;
4010 }
4011
4012 /* Returns 1 if a number has "quarter-precision" float format
4013 0baBbbbbbc defgh000 00000000 00000000. */
4014
4015 static int
4016 is_quarter_float (unsigned imm)
4017 {
4018 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4019 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4020 }
4021
4022 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4023 0baBbbbbbc defgh000 00000000 00000000.
4024 The minus-zero case needs special handling, since it can't be encoded in the
4025 "quarter-precision" float format, but can nonetheless be loaded as an integer
4026 constant. */
4027
4028 static unsigned
4029 parse_qfloat_immediate (char **ccp, int *immed)
4030 {
4031 char *str = *ccp;
4032 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4033
4034 skip_past_char (&str, '#');
4035
4036 if ((str = atof_ieee (str, 's', words)) != NULL)
4037 {
4038 unsigned fpword = 0;
4039 int i;
4040
4041 /* Our FP word must be 32 bits (single-precision FP). */
4042 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4043 {
4044 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4045 fpword |= words[i];
4046 }
4047
4048 if (is_quarter_float (fpword) || fpword == 0x80000000)
4049 *immed = fpword;
4050 else
4051 return FAIL;
4052
4053 *ccp = str;
4054
4055 return SUCCESS;
4056 }
4057
4058 return FAIL;
4059 }
4060
4061 /* Shift operands. */
4062 enum shift_kind
4063 {
4064 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4065 };
4066
4067 struct asm_shift_name
4068 {
4069 const char *name;
4070 enum shift_kind kind;
4071 };
4072
4073 /* Third argument to parse_shift. */
4074 enum parse_shift_mode
4075 {
4076 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4077 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4078 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4079 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4080 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4081 };
4082
4083 /* Parse a <shift> specifier on an ARM data processing instruction.
4084 This has three forms:
4085
4086 (LSL|LSR|ASL|ASR|ROR) Rs
4087 (LSL|LSR|ASL|ASR|ROR) #imm
4088 RRX
4089
4090 Note that ASL is assimilated to LSL in the instruction encoding, and
4091 RRX to ROR #0 (which cannot be written as such). */
4092
4093 static int
4094 parse_shift (char **str, int i, enum parse_shift_mode mode)
4095 {
4096 const struct asm_shift_name *shift_name;
4097 enum shift_kind shift;
4098 char *s = *str;
4099 char *p = s;
4100 int reg;
4101
4102 for (p = *str; ISALPHA (*p); p++)
4103 ;
4104
4105 if (p == *str)
4106 {
4107 inst.error = _("shift expression expected");
4108 return FAIL;
4109 }
4110
4111 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4112
4113 if (shift_name == NULL)
4114 {
4115 inst.error = _("shift expression expected");
4116 return FAIL;
4117 }
4118
4119 shift = shift_name->kind;
4120
4121 switch (mode)
4122 {
4123 case NO_SHIFT_RESTRICT:
4124 case SHIFT_IMMEDIATE: break;
4125
4126 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4127 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4128 {
4129 inst.error = _("'LSL' or 'ASR' required");
4130 return FAIL;
4131 }
4132 break;
4133
4134 case SHIFT_LSL_IMMEDIATE:
4135 if (shift != SHIFT_LSL)
4136 {
4137 inst.error = _("'LSL' required");
4138 return FAIL;
4139 }
4140 break;
4141
4142 case SHIFT_ASR_IMMEDIATE:
4143 if (shift != SHIFT_ASR)
4144 {
4145 inst.error = _("'ASR' required");
4146 return FAIL;
4147 }
4148 break;
4149
4150 default: abort ();
4151 }
4152
4153 if (shift != SHIFT_RRX)
4154 {
4155 /* Whitespace can appear here if the next thing is a bare digit. */
4156 skip_whitespace (p);
4157
4158 if (mode == NO_SHIFT_RESTRICT
4159 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4160 {
4161 inst.operands[i].imm = reg;
4162 inst.operands[i].immisreg = 1;
4163 }
4164 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4165 return FAIL;
4166 }
4167 inst.operands[i].shift_kind = shift;
4168 inst.operands[i].shifted = 1;
4169 *str = p;
4170 return SUCCESS;
4171 }
4172
4173 /* Parse a <shifter_operand> for an ARM data processing instruction:
4174
4175 #<immediate>
4176 #<immediate>, <rotate>
4177 <Rm>
4178 <Rm>, <shift>
4179
4180 where <shift> is defined by parse_shift above, and <rotate> is a
4181 multiple of 2 between 0 and 30. Validation of immediate operands
4182 is deferred to md_apply_fix. */
4183
4184 static int
4185 parse_shifter_operand (char **str, int i)
4186 {
4187 int value;
4188 expressionS expr;
4189
4190 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4191 {
4192 inst.operands[i].reg = value;
4193 inst.operands[i].isreg = 1;
4194
4195 /* parse_shift will override this if appropriate */
4196 inst.reloc.exp.X_op = O_constant;
4197 inst.reloc.exp.X_add_number = 0;
4198
4199 if (skip_past_comma (str) == FAIL)
4200 return SUCCESS;
4201
4202 /* Shift operation on register. */
4203 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4204 }
4205
4206 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4207 return FAIL;
4208
4209 if (skip_past_comma (str) == SUCCESS)
4210 {
4211 /* #x, y -- ie explicit rotation by Y. */
4212 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4213 return FAIL;
4214
4215 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4216 {
4217 inst.error = _("constant expression expected");
4218 return FAIL;
4219 }
4220
4221 value = expr.X_add_number;
4222 if (value < 0 || value > 30 || value % 2 != 0)
4223 {
4224 inst.error = _("invalid rotation");
4225 return FAIL;
4226 }
4227 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4228 {
4229 inst.error = _("invalid constant");
4230 return FAIL;
4231 }
4232
4233 /* Convert to decoded value. md_apply_fix will put it back. */
4234 inst.reloc.exp.X_add_number
4235 = (((inst.reloc.exp.X_add_number << (32 - value))
4236 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4237 }
4238
4239 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4240 inst.reloc.pc_rel = 0;
4241 return SUCCESS;
4242 }
4243
4244 /* Parse all forms of an ARM address expression. Information is written
4245 to inst.operands[i] and/or inst.reloc.
4246
4247 Preindexed addressing (.preind=1):
4248
4249 [Rn, #offset] .reg=Rn .reloc.exp=offset
4250 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4251 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4252 .shift_kind=shift .reloc.exp=shift_imm
4253
4254 These three may have a trailing ! which causes .writeback to be set also.
4255
4256 Postindexed addressing (.postind=1, .writeback=1):
4257
4258 [Rn], #offset .reg=Rn .reloc.exp=offset
4259 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4260 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4261 .shift_kind=shift .reloc.exp=shift_imm
4262
4263 Unindexed addressing (.preind=0, .postind=0):
4264
4265 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4266
4267 Other:
4268
4269 [Rn]{!} shorthand for [Rn,#0]{!}
4270 =immediate .isreg=0 .reloc.exp=immediate
4271 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4272
4273 It is the caller's responsibility to check for addressing modes not
4274 supported by the instruction, and to set inst.reloc.type. */
4275
4276 static int
4277 parse_address (char **str, int i)
4278 {
4279 char *p = *str;
4280 int reg;
4281
4282 if (skip_past_char (&p, '[') == FAIL)
4283 {
4284 if (skip_past_char (&p, '=') == FAIL)
4285 {
4286 /* bare address - translate to PC-relative offset */
4287 inst.reloc.pc_rel = 1;
4288 inst.operands[i].reg = REG_PC;
4289 inst.operands[i].isreg = 1;
4290 inst.operands[i].preind = 1;
4291 }
4292 /* else a load-constant pseudo op, no special treatment needed here */
4293
4294 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4295 return FAIL;
4296
4297 *str = p;
4298 return SUCCESS;
4299 }
4300
4301 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4302 {
4303 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4304 return FAIL;
4305 }
4306 inst.operands[i].reg = reg;
4307 inst.operands[i].isreg = 1;
4308
4309 if (skip_past_comma (&p) == SUCCESS)
4310 {
4311 inst.operands[i].preind = 1;
4312
4313 if (*p == '+') p++;
4314 else if (*p == '-') p++, inst.operands[i].negative = 1;
4315
4316 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4317 {
4318 inst.operands[i].imm = reg;
4319 inst.operands[i].immisreg = 1;
4320
4321 if (skip_past_comma (&p) == SUCCESS)
4322 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4323 return FAIL;
4324 }
4325 else if (skip_past_char (&p, ':') == SUCCESS)
4326 {
4327 /* FIXME: '@' should be used here, but it's filtered out by generic
4328 code before we get to see it here. This may be subject to
4329 change. */
4330 expressionS exp;
4331 my_get_expression (&exp, &p, GE_NO_PREFIX);
4332 if (exp.X_op != O_constant)
4333 {
4334 inst.error = _("alignment must be constant");
4335 return FAIL;
4336 }
4337 inst.operands[i].imm = exp.X_add_number << 8;
4338 inst.operands[i].immisalign = 1;
4339 /* Alignments are not pre-indexes. */
4340 inst.operands[i].preind = 0;
4341 }
4342 else
4343 {
4344 if (inst.operands[i].negative)
4345 {
4346 inst.operands[i].negative = 0;
4347 p--;
4348 }
4349 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4350 return FAIL;
4351 }
4352 }
4353
4354 if (skip_past_char (&p, ']') == FAIL)
4355 {
4356 inst.error = _("']' expected");
4357 return FAIL;
4358 }
4359
4360 if (skip_past_char (&p, '!') == SUCCESS)
4361 inst.operands[i].writeback = 1;
4362
4363 else if (skip_past_comma (&p) == SUCCESS)
4364 {
4365 if (skip_past_char (&p, '{') == SUCCESS)
4366 {
4367 /* [Rn], {expr} - unindexed, with option */
4368 if (parse_immediate (&p, &inst.operands[i].imm,
4369 0, 255, TRUE) == FAIL)
4370 return FAIL;
4371
4372 if (skip_past_char (&p, '}') == FAIL)
4373 {
4374 inst.error = _("'}' expected at end of 'option' field");
4375 return FAIL;
4376 }
4377 if (inst.operands[i].preind)
4378 {
4379 inst.error = _("cannot combine index with option");
4380 return FAIL;
4381 }
4382 *str = p;
4383 return SUCCESS;
4384 }
4385 else
4386 {
4387 inst.operands[i].postind = 1;
4388 inst.operands[i].writeback = 1;
4389
4390 if (inst.operands[i].preind)
4391 {
4392 inst.error = _("cannot combine pre- and post-indexing");
4393 return FAIL;
4394 }
4395
4396 if (*p == '+') p++;
4397 else if (*p == '-') p++, inst.operands[i].negative = 1;
4398
4399 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4400 {
4401 /* We might be using the immediate for alignment already. If we
4402 are, OR the register number into the low-order bits. */
4403 if (inst.operands[i].immisalign)
4404 inst.operands[i].imm |= reg;
4405 else
4406 inst.operands[i].imm = reg;
4407 inst.operands[i].immisreg = 1;
4408
4409 if (skip_past_comma (&p) == SUCCESS)
4410 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4411 return FAIL;
4412 }
4413 else
4414 {
4415 if (inst.operands[i].negative)
4416 {
4417 inst.operands[i].negative = 0;
4418 p--;
4419 }
4420 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4421 return FAIL;
4422 }
4423 }
4424 }
4425
4426 /* If at this point neither .preind nor .postind is set, we have a
4427 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4428 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4429 {
4430 inst.operands[i].preind = 1;
4431 inst.reloc.exp.X_op = O_constant;
4432 inst.reloc.exp.X_add_number = 0;
4433 }
4434 *str = p;
4435 return SUCCESS;
4436 }
4437
4438 /* Miscellaneous. */
4439
4440 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4441 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4442 static int
4443 parse_psr (char **str)
4444 {
4445 char *p;
4446 unsigned long psr_field;
4447 const struct asm_psr *psr;
4448 char *start;
4449
4450 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4451 feature for ease of use and backwards compatibility. */
4452 p = *str;
4453 if (strncasecmp (p, "SPSR", 4) == 0)
4454 psr_field = SPSR_BIT;
4455 else if (strncasecmp (p, "CPSR", 4) == 0)
4456 psr_field = 0;
4457 else
4458 {
4459 start = p;
4460 do
4461 p++;
4462 while (ISALNUM (*p) || *p == '_');
4463
4464 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4465 if (!psr)
4466 return FAIL;
4467
4468 *str = p;
4469 return psr->field;
4470 }
4471
4472 p += 4;
4473 if (*p == '_')
4474 {
4475 /* A suffix follows. */
4476 p++;
4477 start = p;
4478
4479 do
4480 p++;
4481 while (ISALNUM (*p) || *p == '_');
4482
4483 psr = hash_find_n (arm_psr_hsh, start, p - start);
4484 if (!psr)
4485 goto error;
4486
4487 psr_field |= psr->field;
4488 }
4489 else
4490 {
4491 if (ISALNUM (*p))
4492 goto error; /* Garbage after "[CS]PSR". */
4493
4494 psr_field |= (PSR_c | PSR_f);
4495 }
4496 *str = p;
4497 return psr_field;
4498
4499 error:
4500 inst.error = _("flag for {c}psr instruction expected");
4501 return FAIL;
4502 }
4503
4504 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4505 value suitable for splatting into the AIF field of the instruction. */
4506
4507 static int
4508 parse_cps_flags (char **str)
4509 {
4510 int val = 0;
4511 int saw_a_flag = 0;
4512 char *s = *str;
4513
4514 for (;;)
4515 switch (*s++)
4516 {
4517 case '\0': case ',':
4518 goto done;
4519
4520 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4521 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4522 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4523
4524 default:
4525 inst.error = _("unrecognized CPS flag");
4526 return FAIL;
4527 }
4528
4529 done:
4530 if (saw_a_flag == 0)
4531 {
4532 inst.error = _("missing CPS flags");
4533 return FAIL;
4534 }
4535
4536 *str = s - 1;
4537 return val;
4538 }
4539
4540 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4541 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4542
4543 static int
4544 parse_endian_specifier (char **str)
4545 {
4546 int little_endian;
4547 char *s = *str;
4548
4549 if (strncasecmp (s, "BE", 2))
4550 little_endian = 0;
4551 else if (strncasecmp (s, "LE", 2))
4552 little_endian = 1;
4553 else
4554 {
4555 inst.error = _("valid endian specifiers are be or le");
4556 return FAIL;
4557 }
4558
4559 if (ISALNUM (s[2]) || s[2] == '_')
4560 {
4561 inst.error = _("valid endian specifiers are be or le");
4562 return FAIL;
4563 }
4564
4565 *str = s + 2;
4566 return little_endian;
4567 }
4568
4569 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4570 value suitable for poking into the rotate field of an sxt or sxta
4571 instruction, or FAIL on error. */
4572
4573 static int
4574 parse_ror (char **str)
4575 {
4576 int rot;
4577 char *s = *str;
4578
4579 if (strncasecmp (s, "ROR", 3) == 0)
4580 s += 3;
4581 else
4582 {
4583 inst.error = _("missing rotation field after comma");
4584 return FAIL;
4585 }
4586
4587 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4588 return FAIL;
4589
4590 switch (rot)
4591 {
4592 case 0: *str = s; return 0x0;
4593 case 8: *str = s; return 0x1;
4594 case 16: *str = s; return 0x2;
4595 case 24: *str = s; return 0x3;
4596
4597 default:
4598 inst.error = _("rotation can only be 0, 8, 16, or 24");
4599 return FAIL;
4600 }
4601 }
4602
4603 /* Parse a conditional code (from conds[] below). The value returned is in the
4604 range 0 .. 14, or FAIL. */
4605 static int
4606 parse_cond (char **str)
4607 {
4608 char *p, *q;
4609 const struct asm_cond *c;
4610
4611 p = q = *str;
4612 while (ISALPHA (*q))
4613 q++;
4614
4615 c = hash_find_n (arm_cond_hsh, p, q - p);
4616 if (!c)
4617 {
4618 inst.error = _("condition required");
4619 return FAIL;
4620 }
4621
4622 *str = q;
4623 return c->value;
4624 }
4625
4626 /* Parse an option for a barrier instruction. Returns the encoding for the
4627 option, or FAIL. */
4628 static int
4629 parse_barrier (char **str)
4630 {
4631 char *p, *q;
4632 const struct asm_barrier_opt *o;
4633
4634 p = q = *str;
4635 while (ISALPHA (*q))
4636 q++;
4637
4638 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
4639 if (!o)
4640 return FAIL;
4641
4642 *str = q;
4643 return o->value;
4644 }
4645
4646 /* Parse the operands of a table branch instruction. Similar to a memory
4647 operand. */
4648 static int
4649 parse_tb (char **str)
4650 {
4651 char * p = *str;
4652 int reg;
4653
4654 if (skip_past_char (&p, '[') == FAIL)
4655 {
4656 inst.error = _("'[' expected");
4657 return FAIL;
4658 }
4659
4660 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4661 {
4662 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4663 return FAIL;
4664 }
4665 inst.operands[0].reg = reg;
4666
4667 if (skip_past_comma (&p) == FAIL)
4668 {
4669 inst.error = _("',' expected");
4670 return FAIL;
4671 }
4672
4673 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4674 {
4675 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4676 return FAIL;
4677 }
4678 inst.operands[0].imm = reg;
4679
4680 if (skip_past_comma (&p) == SUCCESS)
4681 {
4682 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
4683 return FAIL;
4684 if (inst.reloc.exp.X_add_number != 1)
4685 {
4686 inst.error = _("invalid shift");
4687 return FAIL;
4688 }
4689 inst.operands[0].shifted = 1;
4690 }
4691
4692 if (skip_past_char (&p, ']') == FAIL)
4693 {
4694 inst.error = _("']' expected");
4695 return FAIL;
4696 }
4697 *str = p;
4698 return SUCCESS;
4699 }
4700
4701 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4702 information on the types the operands can take and how they are encoded.
4703 Note particularly the abuse of ".regisimm" to signify a Neon register.
4704 Up to three operands may be read; this function handles setting the
4705 ".present" field for each operand itself.
4706 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4707 else returns FAIL. */
4708
4709 static int
4710 parse_neon_mov (char **str, int *which_operand)
4711 {
4712 int i = *which_operand, val;
4713 enum arm_reg_type rtype;
4714 char *ptr = *str;
4715 struct neon_type_el optype;
4716
4717 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4718 {
4719 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4720 inst.operands[i].reg = val;
4721 inst.operands[i].isscalar = 1;
4722 inst.operands[i].vectype = optype;
4723 inst.operands[i++].present = 1;
4724
4725 if (skip_past_comma (&ptr) == FAIL)
4726 goto wanted_comma;
4727
4728 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4729 goto wanted_arm;
4730
4731 inst.operands[i].reg = val;
4732 inst.operands[i].isreg = 1;
4733 inst.operands[i].present = 1;
4734 }
4735 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4736 != FAIL)
4737 {
4738 /* Cases 0, 1, 2, 3, 5 (D only). */
4739 if (skip_past_comma (&ptr) == FAIL)
4740 goto wanted_comma;
4741
4742 inst.operands[i].reg = val;
4743 inst.operands[i].isreg = 1;
4744 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4745 inst.operands[i].vectype = optype;
4746 inst.operands[i++].present = 1;
4747
4748 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4749 {
4750 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4751 inst.operands[i-1].regisimm = 1;
4752 inst.operands[i].reg = val;
4753 inst.operands[i].isreg = 1;
4754 inst.operands[i++].present = 1;
4755
4756 if (rtype == REG_TYPE_NQ)
4757 {
4758 first_error (_("can't use Neon quad register here"));
4759 return FAIL;
4760 }
4761 if (skip_past_comma (&ptr) == FAIL)
4762 goto wanted_comma;
4763 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4764 goto wanted_arm;
4765 inst.operands[i].reg = val;
4766 inst.operands[i].isreg = 1;
4767 inst.operands[i].present = 1;
4768 }
4769 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
4770 {
4771 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4772 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4773 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4774 goto bad_cond;
4775 }
4776 else if (parse_big_immediate (&ptr, i) == SUCCESS)
4777 {
4778 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4779 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4780 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4781 goto bad_cond;
4782 }
4783 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4784 != FAIL)
4785 {
4786 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4787 Case 1: VMOV<c><q> <Dd>, <Dm> */
4788 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4789 goto bad_cond;
4790
4791 inst.operands[i].reg = val;
4792 inst.operands[i].isreg = 1;
4793 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4794 inst.operands[i].vectype = optype;
4795 inst.operands[i].present = 1;
4796 }
4797 else
4798 {
4799 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4800 return FAIL;
4801 }
4802 }
4803 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4804 {
4805 /* Cases 6, 7. */
4806 inst.operands[i].reg = val;
4807 inst.operands[i].isreg = 1;
4808 inst.operands[i++].present = 1;
4809
4810 if (skip_past_comma (&ptr) == FAIL)
4811 goto wanted_comma;
4812
4813 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4814 {
4815 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4816 inst.operands[i].reg = val;
4817 inst.operands[i].isscalar = 1;
4818 inst.operands[i].present = 1;
4819 inst.operands[i].vectype = optype;
4820 }
4821 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4822 {
4823 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4824 inst.operands[i].reg = val;
4825 inst.operands[i].isreg = 1;
4826 inst.operands[i++].present = 1;
4827
4828 if (skip_past_comma (&ptr) == FAIL)
4829 goto wanted_comma;
4830
4831 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFD, NULL, &optype))
4832 == FAIL)
4833 {
4834 first_error (_(reg_expected_msgs[REG_TYPE_VFD]));
4835 return FAIL;
4836 }
4837
4838 inst.operands[i].reg = val;
4839 inst.operands[i].isreg = 1;
4840 inst.operands[i].regisimm = 1;
4841 inst.operands[i].vectype = optype;
4842 inst.operands[i].present = 1;
4843 }
4844 }
4845 else
4846 {
4847 first_error (_("parse error"));
4848 return FAIL;
4849 }
4850
4851 /* Successfully parsed the operands. Update args. */
4852 *which_operand = i;
4853 *str = ptr;
4854 return SUCCESS;
4855
4856 wanted_comma:
4857 first_error (_("expected comma"));
4858 return FAIL;
4859
4860 wanted_arm:
4861 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
4862 return FAIL;
4863
4864 bad_cond:
4865 first_error (_("instruction cannot be conditionalized"));
4866 return FAIL;
4867 }
4868
4869 /* Matcher codes for parse_operands. */
4870 enum operand_parse_code
4871 {
4872 OP_stop, /* end of line */
4873
4874 OP_RR, /* ARM register */
4875 OP_RRnpc, /* ARM register, not r15 */
4876 OP_RRnpcb, /* ARM register, not r15, in square brackets */
4877 OP_RRw, /* ARM register, not r15, optional trailing ! */
4878 OP_RCP, /* Coprocessor number */
4879 OP_RCN, /* Coprocessor register */
4880 OP_RF, /* FPA register */
4881 OP_RVS, /* VFP single precision register */
4882 OP_RVD, /* VFP double precision register (0..15) */
4883 OP_RND, /* Neon double precision register (0..31) */
4884 OP_RNQ, /* Neon quad precision register */
4885 OP_RNDQ, /* Neon double or quad precision register */
4886 OP_RNSC, /* Neon scalar D[X] */
4887 OP_RVC, /* VFP control register */
4888 OP_RMF, /* Maverick F register */
4889 OP_RMD, /* Maverick D register */
4890 OP_RMFX, /* Maverick FX register */
4891 OP_RMDX, /* Maverick DX register */
4892 OP_RMAX, /* Maverick AX register */
4893 OP_RMDS, /* Maverick DSPSC register */
4894 OP_RIWR, /* iWMMXt wR register */
4895 OP_RIWC, /* iWMMXt wC register */
4896 OP_RIWG, /* iWMMXt wCG register */
4897 OP_RXA, /* XScale accumulator register */
4898
4899 OP_REGLST, /* ARM register list */
4900 OP_VRSLST, /* VFP single-precision register list */
4901 OP_VRDLST, /* VFP double-precision register list */
4902 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
4903 OP_NSTRLST, /* Neon element/structure list */
4904
4905 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4906 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
4907 OP_RR_RNSC, /* ARM reg or Neon scalar. */
4908 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
4909 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
4910 OP_VMOV, /* Neon VMOV operands. */
4911 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
4912 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
4913
4914 OP_I0, /* immediate zero */
4915 OP_I7, /* immediate value 0 .. 7 */
4916 OP_I15, /* 0 .. 15 */
4917 OP_I16, /* 1 .. 16 */
4918 OP_I16z, /* 0 .. 16 */
4919 OP_I31, /* 0 .. 31 */
4920 OP_I31w, /* 0 .. 31, optional trailing ! */
4921 OP_I32, /* 1 .. 32 */
4922 OP_I32z, /* 0 .. 32 */
4923 OP_I63, /* 0 .. 63 */
4924 OP_I63s, /* -64 .. 63 */
4925 OP_I64, /* 1 .. 64 */
4926 OP_I64z, /* 0 .. 64 */
4927 OP_I255, /* 0 .. 255 */
4928 OP_Iffff, /* 0 .. 65535 */
4929
4930 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
4931 OP_I7b, /* 0 .. 7 */
4932 OP_I15b, /* 0 .. 15 */
4933 OP_I31b, /* 0 .. 31 */
4934
4935 OP_SH, /* shifter operand */
4936 OP_ADDR, /* Memory address expression (any mode) */
4937 OP_EXP, /* arbitrary expression */
4938 OP_EXPi, /* same, with optional immediate prefix */
4939 OP_EXPr, /* same, with optional relocation suffix */
4940
4941 OP_CPSF, /* CPS flags */
4942 OP_ENDI, /* Endianness specifier */
4943 OP_PSR, /* CPSR/SPSR mask for msr */
4944 OP_COND, /* conditional code */
4945 OP_TB, /* Table branch. */
4946
4947 OP_RRnpc_I0, /* ARM register or literal 0 */
4948 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
4949 OP_RR_EXi, /* ARM register or expression with imm prefix */
4950 OP_RF_IF, /* FPA register or immediate */
4951 OP_RIWR_RIWC, /* iWMMXt R or C reg */
4952
4953 /* Optional operands. */
4954 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
4955 OP_oI31b, /* 0 .. 31 */
4956 OP_oI32b, /* 1 .. 32 */
4957 OP_oIffffb, /* 0 .. 65535 */
4958 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
4959
4960 OP_oRR, /* ARM register */
4961 OP_oRRnpc, /* ARM register, not the PC */
4962 OP_oRND, /* Optional Neon double precision register */
4963 OP_oRNQ, /* Optional Neon quad precision register */
4964 OP_oRNDQ, /* Optional Neon double or quad precision register */
4965 OP_oSHll, /* LSL immediate */
4966 OP_oSHar, /* ASR immediate */
4967 OP_oSHllar, /* LSL or ASR immediate */
4968 OP_oROR, /* ROR 0/8/16/24 */
4969 OP_oBARRIER, /* Option argument for a barrier instruction. */
4970
4971 OP_FIRST_OPTIONAL = OP_oI7b
4972 };
4973
4974 /* Generic instruction operand parser. This does no encoding and no
4975 semantic validation; it merely squirrels values away in the inst
4976 structure. Returns SUCCESS or FAIL depending on whether the
4977 specified grammar matched. */
4978 static int
4979 parse_operands (char *str, const unsigned char *pattern)
4980 {
4981 unsigned const char *upat = pattern;
4982 char *backtrack_pos = 0;
4983 const char *backtrack_error = 0;
4984 int i, val, backtrack_index = 0;
4985 enum arm_reg_type rtype;
4986
4987 #define po_char_or_fail(chr) do { \
4988 if (skip_past_char (&str, chr) == FAIL) \
4989 goto bad_args; \
4990 } while (0)
4991
4992 #define po_reg_or_fail(regtype) do { \
4993 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4994 &inst.operands[i].vectype); \
4995 if (val == FAIL) \
4996 { \
4997 first_error (_(reg_expected_msgs[regtype])); \
4998 goto failure; \
4999 } \
5000 inst.operands[i].reg = val; \
5001 inst.operands[i].isreg = 1; \
5002 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5003 } while (0)
5004
5005 #define po_reg_or_goto(regtype, label) do { \
5006 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5007 &inst.operands[i].vectype); \
5008 if (val == FAIL) \
5009 goto label; \
5010 \
5011 inst.operands[i].reg = val; \
5012 inst.operands[i].isreg = 1; \
5013 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5014 } while (0)
5015
5016 #define po_imm_or_fail(min, max, popt) do { \
5017 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5018 goto failure; \
5019 inst.operands[i].imm = val; \
5020 } while (0)
5021
5022 #define po_scalar_or_goto(elsz, label) do { \
5023 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5024 if (val == FAIL) \
5025 goto label; \
5026 inst.operands[i].reg = val; \
5027 inst.operands[i].isscalar = 1; \
5028 } while (0)
5029
5030 #define po_misc_or_fail(expr) do { \
5031 if (expr) \
5032 goto failure; \
5033 } while (0)
5034
5035 skip_whitespace (str);
5036
5037 for (i = 0; upat[i] != OP_stop; i++)
5038 {
5039 if (upat[i] >= OP_FIRST_OPTIONAL)
5040 {
5041 /* Remember where we are in case we need to backtrack. */
5042 assert (!backtrack_pos);
5043 backtrack_pos = str;
5044 backtrack_error = inst.error;
5045 backtrack_index = i;
5046 }
5047
5048 if (i > 0)
5049 po_char_or_fail (',');
5050
5051 switch (upat[i])
5052 {
5053 /* Registers */
5054 case OP_oRRnpc:
5055 case OP_RRnpc:
5056 case OP_oRR:
5057 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5058 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5059 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5060 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5061 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5062 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5063 case OP_oRND:
5064 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5065 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5066 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5067 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5068 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5069 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5070 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5071 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5072 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5073 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5074 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5075 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5076 case OP_oRNQ:
5077 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5078 case OP_oRNDQ:
5079 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5080
5081 /* Neon scalar. Using an element size of 8 means that some invalid
5082 scalars are accepted here, so deal with those in later code. */
5083 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5084
5085 /* WARNING: We can expand to two operands here. This has the potential
5086 to totally confuse the backtracking mechanism! It will be OK at
5087 least as long as we don't try to use optional args as well,
5088 though. */
5089 case OP_NILO:
5090 {
5091 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5092 i++;
5093 skip_past_comma (&str);
5094 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5095 break;
5096 one_reg_only:
5097 /* Optional register operand was omitted. Unfortunately, it's in
5098 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5099 here (this is a bit grotty). */
5100 inst.operands[i] = inst.operands[i-1];
5101 inst.operands[i-1].present = 0;
5102 break;
5103 try_imm:
5104 /* Immediate gets verified properly later, so accept any now. */
5105 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
5106 }
5107 break;
5108
5109 case OP_RNDQ_I0:
5110 {
5111 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5112 break;
5113 try_imm0:
5114 po_imm_or_fail (0, 0, TRUE);
5115 }
5116 break;
5117
5118 case OP_RR_RNSC:
5119 {
5120 po_scalar_or_goto (8, try_rr);
5121 break;
5122 try_rr:
5123 po_reg_or_fail (REG_TYPE_RN);
5124 }
5125 break;
5126
5127 case OP_RNDQ_RNSC:
5128 {
5129 po_scalar_or_goto (8, try_ndq);
5130 break;
5131 try_ndq:
5132 po_reg_or_fail (REG_TYPE_NDQ);
5133 }
5134 break;
5135
5136 case OP_RND_RNSC:
5137 {
5138 po_scalar_or_goto (8, try_vfd);
5139 break;
5140 try_vfd:
5141 po_reg_or_fail (REG_TYPE_VFD);
5142 }
5143 break;
5144
5145 case OP_VMOV:
5146 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5147 not careful then bad things might happen. */
5148 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5149 break;
5150
5151 case OP_RNDQ_IMVNb:
5152 {
5153 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5154 break;
5155 try_mvnimm:
5156 /* There's a possibility of getting a 64-bit immediate here, so
5157 we need special handling. */
5158 if (parse_big_immediate (&str, i) == FAIL)
5159 {
5160 inst.error = _("immediate value is out of range");
5161 goto failure;
5162 }
5163 }
5164 break;
5165
5166 case OP_RNDQ_I63b:
5167 {
5168 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5169 break;
5170 try_shimm:
5171 po_imm_or_fail (0, 63, TRUE);
5172 }
5173 break;
5174
5175 case OP_RRnpcb:
5176 po_char_or_fail ('[');
5177 po_reg_or_fail (REG_TYPE_RN);
5178 po_char_or_fail (']');
5179 break;
5180
5181 case OP_RRw:
5182 po_reg_or_fail (REG_TYPE_RN);
5183 if (skip_past_char (&str, '!') == SUCCESS)
5184 inst.operands[i].writeback = 1;
5185 break;
5186
5187 /* Immediates */
5188 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5189 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5190 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5191 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5192 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5193 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5194 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5195 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5196 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5197 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5198 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5199 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5200 case OP_Iffff: po_imm_or_fail ( 0, 0xffff, FALSE); break;
5201
5202 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5203 case OP_oI7b:
5204 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5205 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5206 case OP_oI31b:
5207 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5208 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5209 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5210
5211 /* Immediate variants */
5212 case OP_oI255c:
5213 po_char_or_fail ('{');
5214 po_imm_or_fail (0, 255, TRUE);
5215 po_char_or_fail ('}');
5216 break;
5217
5218 case OP_I31w:
5219 /* The expression parser chokes on a trailing !, so we have
5220 to find it first and zap it. */
5221 {
5222 char *s = str;
5223 while (*s && *s != ',')
5224 s++;
5225 if (s[-1] == '!')
5226 {
5227 s[-1] = '\0';
5228 inst.operands[i].writeback = 1;
5229 }
5230 po_imm_or_fail (0, 31, TRUE);
5231 if (str == s - 1)
5232 str = s;
5233 }
5234 break;
5235
5236 /* Expressions */
5237 case OP_EXPi: EXPi:
5238 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5239 GE_OPT_PREFIX));
5240 break;
5241
5242 case OP_EXP:
5243 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5244 GE_NO_PREFIX));
5245 break;
5246
5247 case OP_EXPr: EXPr:
5248 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5249 GE_NO_PREFIX));
5250 if (inst.reloc.exp.X_op == O_symbol)
5251 {
5252 val = parse_reloc (&str);
5253 if (val == -1)
5254 {
5255 inst.error = _("unrecognized relocation suffix");
5256 goto failure;
5257 }
5258 else if (val != BFD_RELOC_UNUSED)
5259 {
5260 inst.operands[i].imm = val;
5261 inst.operands[i].hasreloc = 1;
5262 }
5263 }
5264 break;
5265
5266 /* Register or expression */
5267 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5268 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5269
5270 /* Register or immediate */
5271 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5272 I0: po_imm_or_fail (0, 0, FALSE); break;
5273
5274 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5275 IF:
5276 if (!is_immediate_prefix (*str))
5277 goto bad_args;
5278 str++;
5279 val = parse_fpa_immediate (&str);
5280 if (val == FAIL)
5281 goto failure;
5282 /* FPA immediates are encoded as registers 8-15.
5283 parse_fpa_immediate has already applied the offset. */
5284 inst.operands[i].reg = val;
5285 inst.operands[i].isreg = 1;
5286 break;
5287
5288 /* Two kinds of register */
5289 case OP_RIWR_RIWC:
5290 {
5291 struct reg_entry *rege = arm_reg_parse_multi (&str);
5292 if (rege->type != REG_TYPE_MMXWR
5293 && rege->type != REG_TYPE_MMXWC
5294 && rege->type != REG_TYPE_MMXWCG)
5295 {
5296 inst.error = _("iWMMXt data or control register expected");
5297 goto failure;
5298 }
5299 inst.operands[i].reg = rege->number;
5300 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5301 }
5302 break;
5303
5304 /* Misc */
5305 case OP_CPSF: val = parse_cps_flags (&str); break;
5306 case OP_ENDI: val = parse_endian_specifier (&str); break;
5307 case OP_oROR: val = parse_ror (&str); break;
5308 case OP_PSR: val = parse_psr (&str); break;
5309 case OP_COND: val = parse_cond (&str); break;
5310 case OP_oBARRIER:val = parse_barrier (&str); break;
5311
5312 case OP_TB:
5313 po_misc_or_fail (parse_tb (&str));
5314 break;
5315
5316 /* Register lists */
5317 case OP_REGLST:
5318 val = parse_reg_list (&str);
5319 if (*str == '^')
5320 {
5321 inst.operands[1].writeback = 1;
5322 str++;
5323 }
5324 break;
5325
5326 case OP_VRSLST:
5327 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5328 break;
5329
5330 case OP_VRDLST:
5331 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5332 break;
5333
5334 case OP_NRDLST:
5335 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5336 REGLIST_NEON_D);
5337 break;
5338
5339 case OP_NSTRLST:
5340 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5341 &inst.operands[i].vectype);
5342 break;
5343
5344 /* Addressing modes */
5345 case OP_ADDR:
5346 po_misc_or_fail (parse_address (&str, i));
5347 break;
5348
5349 case OP_SH:
5350 po_misc_or_fail (parse_shifter_operand (&str, i));
5351 break;
5352
5353 case OP_oSHll:
5354 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5355 break;
5356
5357 case OP_oSHar:
5358 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5359 break;
5360
5361 case OP_oSHllar:
5362 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5363 break;
5364
5365 default:
5366 as_fatal ("unhandled operand code %d", upat[i]);
5367 }
5368
5369 /* Various value-based sanity checks and shared operations. We
5370 do not signal immediate failures for the register constraints;
5371 this allows a syntax error to take precedence. */
5372 switch (upat[i])
5373 {
5374 case OP_oRRnpc:
5375 case OP_RRnpc:
5376 case OP_RRnpcb:
5377 case OP_RRw:
5378 case OP_RRnpc_I0:
5379 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5380 inst.error = BAD_PC;
5381 break;
5382
5383 case OP_CPSF:
5384 case OP_ENDI:
5385 case OP_oROR:
5386 case OP_PSR:
5387 case OP_COND:
5388 case OP_oBARRIER:
5389 case OP_REGLST:
5390 case OP_VRSLST:
5391 case OP_VRDLST:
5392 case OP_NRDLST:
5393 case OP_NSTRLST:
5394 if (val == FAIL)
5395 goto failure;
5396 inst.operands[i].imm = val;
5397 break;
5398
5399 default:
5400 break;
5401 }
5402
5403 /* If we get here, this operand was successfully parsed. */
5404 inst.operands[i].present = 1;
5405 continue;
5406
5407 bad_args:
5408 inst.error = BAD_ARGS;
5409
5410 failure:
5411 if (!backtrack_pos)
5412 {
5413 /* The parse routine should already have set inst.error, but set a
5414 defaut here just in case. */
5415 if (!inst.error)
5416 inst.error = _("syntax error");
5417 return FAIL;
5418 }
5419
5420 /* Do not backtrack over a trailing optional argument that
5421 absorbed some text. We will only fail again, with the
5422 'garbage following instruction' error message, which is
5423 probably less helpful than the current one. */
5424 if (backtrack_index == i && backtrack_pos != str
5425 && upat[i+1] == OP_stop)
5426 {
5427 if (!inst.error)
5428 inst.error = _("syntax error");
5429 return FAIL;
5430 }
5431
5432 /* Try again, skipping the optional argument at backtrack_pos. */
5433 str = backtrack_pos;
5434 inst.error = backtrack_error;
5435 inst.operands[backtrack_index].present = 0;
5436 i = backtrack_index;
5437 backtrack_pos = 0;
5438 }
5439
5440 /* Check that we have parsed all the arguments. */
5441 if (*str != '\0' && !inst.error)
5442 inst.error = _("garbage following instruction");
5443
5444 return inst.error ? FAIL : SUCCESS;
5445 }
5446
5447 #undef po_char_or_fail
5448 #undef po_reg_or_fail
5449 #undef po_reg_or_goto
5450 #undef po_imm_or_fail
5451 #undef po_scalar_or_fail
5452 \f
5453 /* Shorthand macro for instruction encoding functions issuing errors. */
5454 #define constraint(expr, err) do { \
5455 if (expr) \
5456 { \
5457 inst.error = err; \
5458 return; \
5459 } \
5460 } while (0)
5461
5462 /* Functions for operand encoding. ARM, then Thumb. */
5463
5464 #define rotate_left(v, n) (v << n | v >> (32 - n))
5465
5466 /* If VAL can be encoded in the immediate field of an ARM instruction,
5467 return the encoded form. Otherwise, return FAIL. */
5468
5469 static unsigned int
5470 encode_arm_immediate (unsigned int val)
5471 {
5472 unsigned int a, i;
5473
5474 for (i = 0; i < 32; i += 2)
5475 if ((a = rotate_left (val, i)) <= 0xff)
5476 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
5477
5478 return FAIL;
5479 }
5480
5481 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5482 return the encoded form. Otherwise, return FAIL. */
5483 static unsigned int
5484 encode_thumb32_immediate (unsigned int val)
5485 {
5486 unsigned int a, i;
5487
5488 if (val <= 0xff)
5489 return val;
5490
5491 for (i = 1; i <= 24; i++)
5492 {
5493 a = val >> i;
5494 if ((val & ~(0xff << i)) == 0)
5495 return ((val >> i) & 0x7f) | ((32 - i) << 7);
5496 }
5497
5498 a = val & 0xff;
5499 if (val == ((a << 16) | a))
5500 return 0x100 | a;
5501 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
5502 return 0x300 | a;
5503
5504 a = val & 0xff00;
5505 if (val == ((a << 16) | a))
5506 return 0x200 | (a >> 8);
5507
5508 return FAIL;
5509 }
5510 /* Encode a VFP SP or DP register number into inst.instruction. */
5511
5512 static void
5513 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
5514 {
5515 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
5516 && reg > 15)
5517 {
5518 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
5519 {
5520 if (thumb_mode)
5521 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
5522 fpu_vfp_ext_v3);
5523 else
5524 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
5525 fpu_vfp_ext_v3);
5526 }
5527 else
5528 {
5529 first_error (_("D register out of range for selected VFP version"));
5530 return;
5531 }
5532 }
5533
5534 switch (pos)
5535 {
5536 case VFP_REG_Sd:
5537 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
5538 break;
5539
5540 case VFP_REG_Sn:
5541 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
5542 break;
5543
5544 case VFP_REG_Sm:
5545 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
5546 break;
5547
5548 case VFP_REG_Dd:
5549 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
5550 break;
5551
5552 case VFP_REG_Dn:
5553 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
5554 break;
5555
5556 case VFP_REG_Dm:
5557 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
5558 break;
5559
5560 default:
5561 abort ();
5562 }
5563 }
5564
5565 /* Encode a <shift> in an ARM-format instruction. The immediate,
5566 if any, is handled by md_apply_fix. */
5567 static void
5568 encode_arm_shift (int i)
5569 {
5570 if (inst.operands[i].shift_kind == SHIFT_RRX)
5571 inst.instruction |= SHIFT_ROR << 5;
5572 else
5573 {
5574 inst.instruction |= inst.operands[i].shift_kind << 5;
5575 if (inst.operands[i].immisreg)
5576 {
5577 inst.instruction |= SHIFT_BY_REG;
5578 inst.instruction |= inst.operands[i].imm << 8;
5579 }
5580 else
5581 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5582 }
5583 }
5584
5585 static void
5586 encode_arm_shifter_operand (int i)
5587 {
5588 if (inst.operands[i].isreg)
5589 {
5590 inst.instruction |= inst.operands[i].reg;
5591 encode_arm_shift (i);
5592 }
5593 else
5594 inst.instruction |= INST_IMMEDIATE;
5595 }
5596
5597 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5598 static void
5599 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
5600 {
5601 assert (inst.operands[i].isreg);
5602 inst.instruction |= inst.operands[i].reg << 16;
5603
5604 if (inst.operands[i].preind)
5605 {
5606 if (is_t)
5607 {
5608 inst.error = _("instruction does not accept preindexed addressing");
5609 return;
5610 }
5611 inst.instruction |= PRE_INDEX;
5612 if (inst.operands[i].writeback)
5613 inst.instruction |= WRITE_BACK;
5614
5615 }
5616 else if (inst.operands[i].postind)
5617 {
5618 assert (inst.operands[i].writeback);
5619 if (is_t)
5620 inst.instruction |= WRITE_BACK;
5621 }
5622 else /* unindexed - only for coprocessor */
5623 {
5624 inst.error = _("instruction does not accept unindexed addressing");
5625 return;
5626 }
5627
5628 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
5629 && (((inst.instruction & 0x000f0000) >> 16)
5630 == ((inst.instruction & 0x0000f000) >> 12)))
5631 as_warn ((inst.instruction & LOAD_BIT)
5632 ? _("destination register same as write-back base")
5633 : _("source register same as write-back base"));
5634 }
5635
5636 /* inst.operands[i] was set up by parse_address. Encode it into an
5637 ARM-format mode 2 load or store instruction. If is_t is true,
5638 reject forms that cannot be used with a T instruction (i.e. not
5639 post-indexed). */
5640 static void
5641 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
5642 {
5643 encode_arm_addr_mode_common (i, is_t);
5644
5645 if (inst.operands[i].immisreg)
5646 {
5647 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
5648 inst.instruction |= inst.operands[i].imm;
5649 if (!inst.operands[i].negative)
5650 inst.instruction |= INDEX_UP;
5651 if (inst.operands[i].shifted)
5652 {
5653 if (inst.operands[i].shift_kind == SHIFT_RRX)
5654 inst.instruction |= SHIFT_ROR << 5;
5655 else
5656 {
5657 inst.instruction |= inst.operands[i].shift_kind << 5;
5658 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5659 }
5660 }
5661 }
5662 else /* immediate offset in inst.reloc */
5663 {
5664 if (inst.reloc.type == BFD_RELOC_UNUSED)
5665 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
5666 }
5667 }
5668
5669 /* inst.operands[i] was set up by parse_address. Encode it into an
5670 ARM-format mode 3 load or store instruction. Reject forms that
5671 cannot be used with such instructions. If is_t is true, reject
5672 forms that cannot be used with a T instruction (i.e. not
5673 post-indexed). */
5674 static void
5675 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
5676 {
5677 if (inst.operands[i].immisreg && inst.operands[i].shifted)
5678 {
5679 inst.error = _("instruction does not accept scaled register index");
5680 return;
5681 }
5682
5683 encode_arm_addr_mode_common (i, is_t);
5684
5685 if (inst.operands[i].immisreg)
5686 {
5687 inst.instruction |= inst.operands[i].imm;
5688 if (!inst.operands[i].negative)
5689 inst.instruction |= INDEX_UP;
5690 }
5691 else /* immediate offset in inst.reloc */
5692 {
5693 inst.instruction |= HWOFFSET_IMM;
5694 if (inst.reloc.type == BFD_RELOC_UNUSED)
5695 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
5696 }
5697 }
5698
5699 /* inst.operands[i] was set up by parse_address. Encode it into an
5700 ARM-format instruction. Reject all forms which cannot be encoded
5701 into a coprocessor load/store instruction. If wb_ok is false,
5702 reject use of writeback; if unind_ok is false, reject use of
5703 unindexed addressing. If reloc_override is not 0, use it instead
5704 of BFD_ARM_CP_OFF_IMM. */
5705
5706 static int
5707 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
5708 {
5709 inst.instruction |= inst.operands[i].reg << 16;
5710
5711 assert (!(inst.operands[i].preind && inst.operands[i].postind));
5712
5713 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
5714 {
5715 assert (!inst.operands[i].writeback);
5716 if (!unind_ok)
5717 {
5718 inst.error = _("instruction does not support unindexed addressing");
5719 return FAIL;
5720 }
5721 inst.instruction |= inst.operands[i].imm;
5722 inst.instruction |= INDEX_UP;
5723 return SUCCESS;
5724 }
5725
5726 if (inst.operands[i].preind)
5727 inst.instruction |= PRE_INDEX;
5728
5729 if (inst.operands[i].writeback)
5730 {
5731 if (inst.operands[i].reg == REG_PC)
5732 {
5733 inst.error = _("pc may not be used with write-back");
5734 return FAIL;
5735 }
5736 if (!wb_ok)
5737 {
5738 inst.error = _("instruction does not support writeback");
5739 return FAIL;
5740 }
5741 inst.instruction |= WRITE_BACK;
5742 }
5743
5744 if (reloc_override)
5745 inst.reloc.type = reloc_override;
5746 else if (thumb_mode)
5747 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
5748 else
5749 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
5750 return SUCCESS;
5751 }
5752
5753 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5754 Determine whether it can be performed with a move instruction; if
5755 it can, convert inst.instruction to that move instruction and
5756 return 1; if it can't, convert inst.instruction to a literal-pool
5757 load and return 0. If this is not a valid thing to do in the
5758 current context, set inst.error and return 1.
5759
5760 inst.operands[i] describes the destination register. */
5761
5762 static int
5763 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
5764 {
5765 unsigned long tbit;
5766
5767 if (thumb_p)
5768 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
5769 else
5770 tbit = LOAD_BIT;
5771
5772 if ((inst.instruction & tbit) == 0)
5773 {
5774 inst.error = _("invalid pseudo operation");
5775 return 1;
5776 }
5777 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
5778 {
5779 inst.error = _("constant expression expected");
5780 return 1;
5781 }
5782 if (inst.reloc.exp.X_op == O_constant)
5783 {
5784 if (thumb_p)
5785 {
5786 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
5787 {
5788 /* This can be done with a mov(1) instruction. */
5789 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
5790 inst.instruction |= inst.reloc.exp.X_add_number;
5791 return 1;
5792 }
5793 }
5794 else
5795 {
5796 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
5797 if (value != FAIL)
5798 {
5799 /* This can be done with a mov instruction. */
5800 inst.instruction &= LITERAL_MASK;
5801 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
5802 inst.instruction |= value & 0xfff;
5803 return 1;
5804 }
5805
5806 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
5807 if (value != FAIL)
5808 {
5809 /* This can be done with a mvn instruction. */
5810 inst.instruction &= LITERAL_MASK;
5811 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
5812 inst.instruction |= value & 0xfff;
5813 return 1;
5814 }
5815 }
5816 }
5817
5818 if (add_to_lit_pool () == FAIL)
5819 {
5820 inst.error = _("literal pool insertion failed");
5821 return 1;
5822 }
5823 inst.operands[1].reg = REG_PC;
5824 inst.operands[1].isreg = 1;
5825 inst.operands[1].preind = 1;
5826 inst.reloc.pc_rel = 1;
5827 inst.reloc.type = (thumb_p
5828 ? BFD_RELOC_ARM_THUMB_OFFSET
5829 : (mode_3
5830 ? BFD_RELOC_ARM_HWLITERAL
5831 : BFD_RELOC_ARM_LITERAL));
5832 return 0;
5833 }
5834
5835 /* Functions for instruction encoding, sorted by subarchitecture.
5836 First some generics; their names are taken from the conventional
5837 bit positions for register arguments in ARM format instructions. */
5838
5839 static void
5840 do_noargs (void)
5841 {
5842 }
5843
5844 static void
5845 do_rd (void)
5846 {
5847 inst.instruction |= inst.operands[0].reg << 12;
5848 }
5849
5850 static void
5851 do_rd_rm (void)
5852 {
5853 inst.instruction |= inst.operands[0].reg << 12;
5854 inst.instruction |= inst.operands[1].reg;
5855 }
5856
5857 static void
5858 do_rd_rn (void)
5859 {
5860 inst.instruction |= inst.operands[0].reg << 12;
5861 inst.instruction |= inst.operands[1].reg << 16;
5862 }
5863
5864 static void
5865 do_rn_rd (void)
5866 {
5867 inst.instruction |= inst.operands[0].reg << 16;
5868 inst.instruction |= inst.operands[1].reg << 12;
5869 }
5870
5871 static void
5872 do_rd_rm_rn (void)
5873 {
5874 unsigned Rn = inst.operands[2].reg;
5875 /* Enforce restrictions on SWP instruction. */
5876 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
5877 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
5878 _("Rn must not overlap other operands"));
5879 inst.instruction |= inst.operands[0].reg << 12;
5880 inst.instruction |= inst.operands[1].reg;
5881 inst.instruction |= Rn << 16;
5882 }
5883
5884 static void
5885 do_rd_rn_rm (void)
5886 {
5887 inst.instruction |= inst.operands[0].reg << 12;
5888 inst.instruction |= inst.operands[1].reg << 16;
5889 inst.instruction |= inst.operands[2].reg;
5890 }
5891
5892 static void
5893 do_rm_rd_rn (void)
5894 {
5895 inst.instruction |= inst.operands[0].reg;
5896 inst.instruction |= inst.operands[1].reg << 12;
5897 inst.instruction |= inst.operands[2].reg << 16;
5898 }
5899
5900 static void
5901 do_imm0 (void)
5902 {
5903 inst.instruction |= inst.operands[0].imm;
5904 }
5905
5906 static void
5907 do_rd_cpaddr (void)
5908 {
5909 inst.instruction |= inst.operands[0].reg << 12;
5910 encode_arm_cp_address (1, TRUE, TRUE, 0);
5911 }
5912
5913 /* ARM instructions, in alphabetical order by function name (except
5914 that wrapper functions appear immediately after the function they
5915 wrap). */
5916
5917 /* This is a pseudo-op of the form "adr rd, label" to be converted
5918 into a relative address of the form "add rd, pc, #label-.-8". */
5919
5920 static void
5921 do_adr (void)
5922 {
5923 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5924
5925 /* Frag hacking will turn this into a sub instruction if the offset turns
5926 out to be negative. */
5927 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5928 inst.reloc.pc_rel = 1;
5929 inst.reloc.exp.X_add_number -= 8;
5930 }
5931
5932 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5933 into a relative address of the form:
5934 add rd, pc, #low(label-.-8)"
5935 add rd, rd, #high(label-.-8)" */
5936
5937 static void
5938 do_adrl (void)
5939 {
5940 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5941
5942 /* Frag hacking will turn this into a sub instruction if the offset turns
5943 out to be negative. */
5944 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
5945 inst.reloc.pc_rel = 1;
5946 inst.size = INSN_SIZE * 2;
5947 inst.reloc.exp.X_add_number -= 8;
5948 }
5949
5950 static void
5951 do_arit (void)
5952 {
5953 if (!inst.operands[1].present)
5954 inst.operands[1].reg = inst.operands[0].reg;
5955 inst.instruction |= inst.operands[0].reg << 12;
5956 inst.instruction |= inst.operands[1].reg << 16;
5957 encode_arm_shifter_operand (2);
5958 }
5959
5960 static void
5961 do_barrier (void)
5962 {
5963 if (inst.operands[0].present)
5964 {
5965 constraint ((inst.instruction & 0xf0) != 0x40
5966 && inst.operands[0].imm != 0xf,
5967 "bad barrier type");
5968 inst.instruction |= inst.operands[0].imm;
5969 }
5970 else
5971 inst.instruction |= 0xf;
5972 }
5973
5974 static void
5975 do_bfc (void)
5976 {
5977 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
5978 constraint (msb > 32, _("bit-field extends past end of register"));
5979 /* The instruction encoding stores the LSB and MSB,
5980 not the LSB and width. */
5981 inst.instruction |= inst.operands[0].reg << 12;
5982 inst.instruction |= inst.operands[1].imm << 7;
5983 inst.instruction |= (msb - 1) << 16;
5984 }
5985
5986 static void
5987 do_bfi (void)
5988 {
5989 unsigned int msb;
5990
5991 /* #0 in second position is alternative syntax for bfc, which is
5992 the same instruction but with REG_PC in the Rm field. */
5993 if (!inst.operands[1].isreg)
5994 inst.operands[1].reg = REG_PC;
5995
5996 msb = inst.operands[2].imm + inst.operands[3].imm;
5997 constraint (msb > 32, _("bit-field extends past end of register"));
5998 /* The instruction encoding stores the LSB and MSB,
5999 not the LSB and width. */
6000 inst.instruction |= inst.operands[0].reg << 12;
6001 inst.instruction |= inst.operands[1].reg;
6002 inst.instruction |= inst.operands[2].imm << 7;
6003 inst.instruction |= (msb - 1) << 16;
6004 }
6005
6006 static void
6007 do_bfx (void)
6008 {
6009 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6010 _("bit-field extends past end of register"));
6011 inst.instruction |= inst.operands[0].reg << 12;
6012 inst.instruction |= inst.operands[1].reg;
6013 inst.instruction |= inst.operands[2].imm << 7;
6014 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6015 }
6016
6017 /* ARM V5 breakpoint instruction (argument parse)
6018 BKPT <16 bit unsigned immediate>
6019 Instruction is not conditional.
6020 The bit pattern given in insns[] has the COND_ALWAYS condition,
6021 and it is an error if the caller tried to override that. */
6022
6023 static void
6024 do_bkpt (void)
6025 {
6026 /* Top 12 of 16 bits to bits 19:8. */
6027 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6028
6029 /* Bottom 4 of 16 bits to bits 3:0. */
6030 inst.instruction |= inst.operands[0].imm & 0xf;
6031 }
6032
6033 static void
6034 encode_branch (int default_reloc)
6035 {
6036 if (inst.operands[0].hasreloc)
6037 {
6038 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6039 _("the only suffix valid here is '(plt)'"));
6040 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6041 }
6042 else
6043 {
6044 inst.reloc.type = default_reloc;
6045 }
6046 inst.reloc.pc_rel = 1;
6047 }
6048
6049 static void
6050 do_branch (void)
6051 {
6052 #ifdef OBJ_ELF
6053 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6054 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6055 else
6056 #endif
6057 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6058 }
6059
6060 static void
6061 do_bl (void)
6062 {
6063 #ifdef OBJ_ELF
6064 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6065 {
6066 if (inst.cond == COND_ALWAYS)
6067 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6068 else
6069 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6070 }
6071 else
6072 #endif
6073 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6074 }
6075
6076 /* ARM V5 branch-link-exchange instruction (argument parse)
6077 BLX <target_addr> ie BLX(1)
6078 BLX{<condition>} <Rm> ie BLX(2)
6079 Unfortunately, there are two different opcodes for this mnemonic.
6080 So, the insns[].value is not used, and the code here zaps values
6081 into inst.instruction.
6082 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6083
6084 static void
6085 do_blx (void)
6086 {
6087 if (inst.operands[0].isreg)
6088 {
6089 /* Arg is a register; the opcode provided by insns[] is correct.
6090 It is not illegal to do "blx pc", just useless. */
6091 if (inst.operands[0].reg == REG_PC)
6092 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6093
6094 inst.instruction |= inst.operands[0].reg;
6095 }
6096 else
6097 {
6098 /* Arg is an address; this instruction cannot be executed
6099 conditionally, and the opcode must be adjusted. */
6100 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6101 inst.instruction = 0xfa000000;
6102 #ifdef OBJ_ELF
6103 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6104 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6105 else
6106 #endif
6107 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6108 }
6109 }
6110
6111 static void
6112 do_bx (void)
6113 {
6114 if (inst.operands[0].reg == REG_PC)
6115 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6116
6117 inst.instruction |= inst.operands[0].reg;
6118 }
6119
6120
6121 /* ARM v5TEJ. Jump to Jazelle code. */
6122
6123 static void
6124 do_bxj (void)
6125 {
6126 if (inst.operands[0].reg == REG_PC)
6127 as_tsktsk (_("use of r15 in bxj is not really useful"));
6128
6129 inst.instruction |= inst.operands[0].reg;
6130 }
6131
6132 /* Co-processor data operation:
6133 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6134 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6135 static void
6136 do_cdp (void)
6137 {
6138 inst.instruction |= inst.operands[0].reg << 8;
6139 inst.instruction |= inst.operands[1].imm << 20;
6140 inst.instruction |= inst.operands[2].reg << 12;
6141 inst.instruction |= inst.operands[3].reg << 16;
6142 inst.instruction |= inst.operands[4].reg;
6143 inst.instruction |= inst.operands[5].imm << 5;
6144 }
6145
6146 static void
6147 do_cmp (void)
6148 {
6149 inst.instruction |= inst.operands[0].reg << 16;
6150 encode_arm_shifter_operand (1);
6151 }
6152
6153 /* Transfer between coprocessor and ARM registers.
6154 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6155 MRC2
6156 MCR{cond}
6157 MCR2
6158
6159 No special properties. */
6160
6161 static void
6162 do_co_reg (void)
6163 {
6164 inst.instruction |= inst.operands[0].reg << 8;
6165 inst.instruction |= inst.operands[1].imm << 21;
6166 inst.instruction |= inst.operands[2].reg << 12;
6167 inst.instruction |= inst.operands[3].reg << 16;
6168 inst.instruction |= inst.operands[4].reg;
6169 inst.instruction |= inst.operands[5].imm << 5;
6170 }
6171
6172 /* Transfer between coprocessor register and pair of ARM registers.
6173 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6174 MCRR2
6175 MRRC{cond}
6176 MRRC2
6177
6178 Two XScale instructions are special cases of these:
6179
6180 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6181 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6182
6183 Result unpredicatable if Rd or Rn is R15. */
6184
6185 static void
6186 do_co_reg2c (void)
6187 {
6188 inst.instruction |= inst.operands[0].reg << 8;
6189 inst.instruction |= inst.operands[1].imm << 4;
6190 inst.instruction |= inst.operands[2].reg << 12;
6191 inst.instruction |= inst.operands[3].reg << 16;
6192 inst.instruction |= inst.operands[4].reg;
6193 }
6194
6195 static void
6196 do_cpsi (void)
6197 {
6198 inst.instruction |= inst.operands[0].imm << 6;
6199 inst.instruction |= inst.operands[1].imm;
6200 }
6201
6202 static void
6203 do_dbg (void)
6204 {
6205 inst.instruction |= inst.operands[0].imm;
6206 }
6207
6208 static void
6209 do_it (void)
6210 {
6211 /* There is no IT instruction in ARM mode. We
6212 process it but do not generate code for it. */
6213 inst.size = 0;
6214 }
6215
6216 static void
6217 do_ldmstm (void)
6218 {
6219 int base_reg = inst.operands[0].reg;
6220 int range = inst.operands[1].imm;
6221
6222 inst.instruction |= base_reg << 16;
6223 inst.instruction |= range;
6224
6225 if (inst.operands[1].writeback)
6226 inst.instruction |= LDM_TYPE_2_OR_3;
6227
6228 if (inst.operands[0].writeback)
6229 {
6230 inst.instruction |= WRITE_BACK;
6231 /* Check for unpredictable uses of writeback. */
6232 if (inst.instruction & LOAD_BIT)
6233 {
6234 /* Not allowed in LDM type 2. */
6235 if ((inst.instruction & LDM_TYPE_2_OR_3)
6236 && ((range & (1 << REG_PC)) == 0))
6237 as_warn (_("writeback of base register is UNPREDICTABLE"));
6238 /* Only allowed if base reg not in list for other types. */
6239 else if (range & (1 << base_reg))
6240 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6241 }
6242 else /* STM. */
6243 {
6244 /* Not allowed for type 2. */
6245 if (inst.instruction & LDM_TYPE_2_OR_3)
6246 as_warn (_("writeback of base register is UNPREDICTABLE"));
6247 /* Only allowed if base reg not in list, or first in list. */
6248 else if ((range & (1 << base_reg))
6249 && (range & ((1 << base_reg) - 1)))
6250 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6251 }
6252 }
6253 }
6254
6255 /* ARMv5TE load-consecutive (argument parse)
6256 Mode is like LDRH.
6257
6258 LDRccD R, mode
6259 STRccD R, mode. */
6260
6261 static void
6262 do_ldrd (void)
6263 {
6264 constraint (inst.operands[0].reg % 2 != 0,
6265 _("first destination register must be even"));
6266 constraint (inst.operands[1].present
6267 && inst.operands[1].reg != inst.operands[0].reg + 1,
6268 _("can only load two consecutive registers"));
6269 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6270 constraint (!inst.operands[2].isreg, _("'[' expected"));
6271
6272 if (!inst.operands[1].present)
6273 inst.operands[1].reg = inst.operands[0].reg + 1;
6274
6275 if (inst.instruction & LOAD_BIT)
6276 {
6277 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6278 register and the first register written; we have to diagnose
6279 overlap between the base and the second register written here. */
6280
6281 if (inst.operands[2].reg == inst.operands[1].reg
6282 && (inst.operands[2].writeback || inst.operands[2].postind))
6283 as_warn (_("base register written back, and overlaps "
6284 "second destination register"));
6285
6286 /* For an index-register load, the index register must not overlap the
6287 destination (even if not write-back). */
6288 else if (inst.operands[2].immisreg
6289 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6290 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6291 as_warn (_("index register overlaps destination register"));
6292 }
6293
6294 inst.instruction |= inst.operands[0].reg << 12;
6295 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6296 }
6297
6298 static void
6299 do_ldrex (void)
6300 {
6301 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6302 || inst.operands[1].postind || inst.operands[1].writeback
6303 || inst.operands[1].immisreg || inst.operands[1].shifted
6304 || inst.operands[1].negative
6305 /* This can arise if the programmer has written
6306 strex rN, rM, foo
6307 or if they have mistakenly used a register name as the last
6308 operand, eg:
6309 strex rN, rM, rX
6310 It is very difficult to distinguish between these two cases
6311 because "rX" might actually be a label. ie the register
6312 name has been occluded by a symbol of the same name. So we
6313 just generate a general 'bad addressing mode' type error
6314 message and leave it up to the programmer to discover the
6315 true cause and fix their mistake. */
6316 || (inst.operands[1].reg == REG_PC),
6317 BAD_ADDR_MODE);
6318
6319 constraint (inst.reloc.exp.X_op != O_constant
6320 || inst.reloc.exp.X_add_number != 0,
6321 _("offset must be zero in ARM encoding"));
6322
6323 inst.instruction |= inst.operands[0].reg << 12;
6324 inst.instruction |= inst.operands[1].reg << 16;
6325 inst.reloc.type = BFD_RELOC_UNUSED;
6326 }
6327
6328 static void
6329 do_ldrexd (void)
6330 {
6331 constraint (inst.operands[0].reg % 2 != 0,
6332 _("even register required"));
6333 constraint (inst.operands[1].present
6334 && inst.operands[1].reg != inst.operands[0].reg + 1,
6335 _("can only load two consecutive registers"));
6336 /* If op 1 were present and equal to PC, this function wouldn't
6337 have been called in the first place. */
6338 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6339
6340 inst.instruction |= inst.operands[0].reg << 12;
6341 inst.instruction |= inst.operands[2].reg << 16;
6342 }
6343
6344 static void
6345 do_ldst (void)
6346 {
6347 inst.instruction |= inst.operands[0].reg << 12;
6348 if (!inst.operands[1].isreg)
6349 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6350 return;
6351 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6352 }
6353
6354 static void
6355 do_ldstt (void)
6356 {
6357 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6358 reject [Rn,...]. */
6359 if (inst.operands[1].preind)
6360 {
6361 constraint (inst.reloc.exp.X_op != O_constant ||
6362 inst.reloc.exp.X_add_number != 0,
6363 _("this instruction requires a post-indexed address"));
6364
6365 inst.operands[1].preind = 0;
6366 inst.operands[1].postind = 1;
6367 inst.operands[1].writeback = 1;
6368 }
6369 inst.instruction |= inst.operands[0].reg << 12;
6370 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6371 }
6372
6373 /* Halfword and signed-byte load/store operations. */
6374
6375 static void
6376 do_ldstv4 (void)
6377 {
6378 inst.instruction |= inst.operands[0].reg << 12;
6379 if (!inst.operands[1].isreg)
6380 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
6381 return;
6382 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
6383 }
6384
6385 static void
6386 do_ldsttv4 (void)
6387 {
6388 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6389 reject [Rn,...]. */
6390 if (inst.operands[1].preind)
6391 {
6392 constraint (inst.reloc.exp.X_op != O_constant ||
6393 inst.reloc.exp.X_add_number != 0,
6394 _("this instruction requires a post-indexed address"));
6395
6396 inst.operands[1].preind = 0;
6397 inst.operands[1].postind = 1;
6398 inst.operands[1].writeback = 1;
6399 }
6400 inst.instruction |= inst.operands[0].reg << 12;
6401 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
6402 }
6403
6404 /* Co-processor register load/store.
6405 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6406 static void
6407 do_lstc (void)
6408 {
6409 inst.instruction |= inst.operands[0].reg << 8;
6410 inst.instruction |= inst.operands[1].reg << 12;
6411 encode_arm_cp_address (2, TRUE, TRUE, 0);
6412 }
6413
6414 static void
6415 do_mlas (void)
6416 {
6417 /* This restriction does not apply to mls (nor to mla in v6, but
6418 that's hard to detect at present). */
6419 if (inst.operands[0].reg == inst.operands[1].reg
6420 && !(inst.instruction & 0x00400000))
6421 as_tsktsk (_("rd and rm should be different in mla"));
6422
6423 inst.instruction |= inst.operands[0].reg << 16;
6424 inst.instruction |= inst.operands[1].reg;
6425 inst.instruction |= inst.operands[2].reg << 8;
6426 inst.instruction |= inst.operands[3].reg << 12;
6427
6428 }
6429
6430 static void
6431 do_mov (void)
6432 {
6433 inst.instruction |= inst.operands[0].reg << 12;
6434 encode_arm_shifter_operand (1);
6435 }
6436
6437 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6438 static void
6439 do_mov16 (void)
6440 {
6441 inst.instruction |= inst.operands[0].reg << 12;
6442 /* The value is in two pieces: 0:11, 16:19. */
6443 inst.instruction |= (inst.operands[1].imm & 0x00000fff);
6444 inst.instruction |= (inst.operands[1].imm & 0x0000f000) << 4;
6445 }
6446
6447 static void
6448 do_mrs (void)
6449 {
6450 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6451 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
6452 != (PSR_c|PSR_f),
6453 _("'CPSR' or 'SPSR' expected"));
6454 inst.instruction |= inst.operands[0].reg << 12;
6455 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
6456 }
6457
6458 /* Two possible forms:
6459 "{C|S}PSR_<field>, Rm",
6460 "{C|S}PSR_f, #expression". */
6461
6462 static void
6463 do_msr (void)
6464 {
6465 inst.instruction |= inst.operands[0].imm;
6466 if (inst.operands[1].isreg)
6467 inst.instruction |= inst.operands[1].reg;
6468 else
6469 {
6470 inst.instruction |= INST_IMMEDIATE;
6471 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6472 inst.reloc.pc_rel = 0;
6473 }
6474 }
6475
6476 static void
6477 do_mul (void)
6478 {
6479 if (!inst.operands[2].present)
6480 inst.operands[2].reg = inst.operands[0].reg;
6481 inst.instruction |= inst.operands[0].reg << 16;
6482 inst.instruction |= inst.operands[1].reg;
6483 inst.instruction |= inst.operands[2].reg << 8;
6484
6485 if (inst.operands[0].reg == inst.operands[1].reg)
6486 as_tsktsk (_("rd and rm should be different in mul"));
6487 }
6488
6489 /* Long Multiply Parser
6490 UMULL RdLo, RdHi, Rm, Rs
6491 SMULL RdLo, RdHi, Rm, Rs
6492 UMLAL RdLo, RdHi, Rm, Rs
6493 SMLAL RdLo, RdHi, Rm, Rs. */
6494
6495 static void
6496 do_mull (void)
6497 {
6498 inst.instruction |= inst.operands[0].reg << 12;
6499 inst.instruction |= inst.operands[1].reg << 16;
6500 inst.instruction |= inst.operands[2].reg;
6501 inst.instruction |= inst.operands[3].reg << 8;
6502
6503 /* rdhi, rdlo and rm must all be different. */
6504 if (inst.operands[0].reg == inst.operands[1].reg
6505 || inst.operands[0].reg == inst.operands[2].reg
6506 || inst.operands[1].reg == inst.operands[2].reg)
6507 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6508 }
6509
6510 static void
6511 do_nop (void)
6512 {
6513 if (inst.operands[0].present)
6514 {
6515 /* Architectural NOP hints are CPSR sets with no bits selected. */
6516 inst.instruction &= 0xf0000000;
6517 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
6518 }
6519 }
6520
6521 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6522 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6523 Condition defaults to COND_ALWAYS.
6524 Error if Rd, Rn or Rm are R15. */
6525
6526 static void
6527 do_pkhbt (void)
6528 {
6529 inst.instruction |= inst.operands[0].reg << 12;
6530 inst.instruction |= inst.operands[1].reg << 16;
6531 inst.instruction |= inst.operands[2].reg;
6532 if (inst.operands[3].present)
6533 encode_arm_shift (3);
6534 }
6535
6536 /* ARM V6 PKHTB (Argument Parse). */
6537
6538 static void
6539 do_pkhtb (void)
6540 {
6541 if (!inst.operands[3].present)
6542 {
6543 /* If the shift specifier is omitted, turn the instruction
6544 into pkhbt rd, rm, rn. */
6545 inst.instruction &= 0xfff00010;
6546 inst.instruction |= inst.operands[0].reg << 12;
6547 inst.instruction |= inst.operands[1].reg;
6548 inst.instruction |= inst.operands[2].reg << 16;
6549 }
6550 else
6551 {
6552 inst.instruction |= inst.operands[0].reg << 12;
6553 inst.instruction |= inst.operands[1].reg << 16;
6554 inst.instruction |= inst.operands[2].reg;
6555 encode_arm_shift (3);
6556 }
6557 }
6558
6559 /* ARMv5TE: Preload-Cache
6560
6561 PLD <addr_mode>
6562
6563 Syntactically, like LDR with B=1, W=0, L=1. */
6564
6565 static void
6566 do_pld (void)
6567 {
6568 constraint (!inst.operands[0].isreg,
6569 _("'[' expected after PLD mnemonic"));
6570 constraint (inst.operands[0].postind,
6571 _("post-indexed expression used in preload instruction"));
6572 constraint (inst.operands[0].writeback,
6573 _("writeback used in preload instruction"));
6574 constraint (!inst.operands[0].preind,
6575 _("unindexed addressing used in preload instruction"));
6576 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6577 }
6578
6579 /* ARMv7: PLI <addr_mode> */
6580 static void
6581 do_pli (void)
6582 {
6583 constraint (!inst.operands[0].isreg,
6584 _("'[' expected after PLI mnemonic"));
6585 constraint (inst.operands[0].postind,
6586 _("post-indexed expression used in preload instruction"));
6587 constraint (inst.operands[0].writeback,
6588 _("writeback used in preload instruction"));
6589 constraint (!inst.operands[0].preind,
6590 _("unindexed addressing used in preload instruction"));
6591 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6592 inst.instruction &= ~PRE_INDEX;
6593 }
6594
6595 static void
6596 do_push_pop (void)
6597 {
6598 inst.operands[1] = inst.operands[0];
6599 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
6600 inst.operands[0].isreg = 1;
6601 inst.operands[0].writeback = 1;
6602 inst.operands[0].reg = REG_SP;
6603 do_ldmstm ();
6604 }
6605
6606 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6607 word at the specified address and the following word
6608 respectively.
6609 Unconditionally executed.
6610 Error if Rn is R15. */
6611
6612 static void
6613 do_rfe (void)
6614 {
6615 inst.instruction |= inst.operands[0].reg << 16;
6616 if (inst.operands[0].writeback)
6617 inst.instruction |= WRITE_BACK;
6618 }
6619
6620 /* ARM V6 ssat (argument parse). */
6621
6622 static void
6623 do_ssat (void)
6624 {
6625 inst.instruction |= inst.operands[0].reg << 12;
6626 inst.instruction |= (inst.operands[1].imm - 1) << 16;
6627 inst.instruction |= inst.operands[2].reg;
6628
6629 if (inst.operands[3].present)
6630 encode_arm_shift (3);
6631 }
6632
6633 /* ARM V6 usat (argument parse). */
6634
6635 static void
6636 do_usat (void)
6637 {
6638 inst.instruction |= inst.operands[0].reg << 12;
6639 inst.instruction |= inst.operands[1].imm << 16;
6640 inst.instruction |= inst.operands[2].reg;
6641
6642 if (inst.operands[3].present)
6643 encode_arm_shift (3);
6644 }
6645
6646 /* ARM V6 ssat16 (argument parse). */
6647
6648 static void
6649 do_ssat16 (void)
6650 {
6651 inst.instruction |= inst.operands[0].reg << 12;
6652 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
6653 inst.instruction |= inst.operands[2].reg;
6654 }
6655
6656 static void
6657 do_usat16 (void)
6658 {
6659 inst.instruction |= inst.operands[0].reg << 12;
6660 inst.instruction |= inst.operands[1].imm << 16;
6661 inst.instruction |= inst.operands[2].reg;
6662 }
6663
6664 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6665 preserving the other bits.
6666
6667 setend <endian_specifier>, where <endian_specifier> is either
6668 BE or LE. */
6669
6670 static void
6671 do_setend (void)
6672 {
6673 if (inst.operands[0].imm)
6674 inst.instruction |= 0x200;
6675 }
6676
6677 static void
6678 do_shift (void)
6679 {
6680 unsigned int Rm = (inst.operands[1].present
6681 ? inst.operands[1].reg
6682 : inst.operands[0].reg);
6683
6684 inst.instruction |= inst.operands[0].reg << 12;
6685 inst.instruction |= Rm;
6686 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
6687 {
6688 inst.instruction |= inst.operands[2].reg << 8;
6689 inst.instruction |= SHIFT_BY_REG;
6690 }
6691 else
6692 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6693 }
6694
6695 static void
6696 do_smc (void)
6697 {
6698 inst.reloc.type = BFD_RELOC_ARM_SMC;
6699 inst.reloc.pc_rel = 0;
6700 }
6701
6702 static void
6703 do_swi (void)
6704 {
6705 inst.reloc.type = BFD_RELOC_ARM_SWI;
6706 inst.reloc.pc_rel = 0;
6707 }
6708
6709 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6710 SMLAxy{cond} Rd,Rm,Rs,Rn
6711 SMLAWy{cond} Rd,Rm,Rs,Rn
6712 Error if any register is R15. */
6713
6714 static void
6715 do_smla (void)
6716 {
6717 inst.instruction |= inst.operands[0].reg << 16;
6718 inst.instruction |= inst.operands[1].reg;
6719 inst.instruction |= inst.operands[2].reg << 8;
6720 inst.instruction |= inst.operands[3].reg << 12;
6721 }
6722
6723 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6724 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6725 Error if any register is R15.
6726 Warning if Rdlo == Rdhi. */
6727
6728 static void
6729 do_smlal (void)
6730 {
6731 inst.instruction |= inst.operands[0].reg << 12;
6732 inst.instruction |= inst.operands[1].reg << 16;
6733 inst.instruction |= inst.operands[2].reg;
6734 inst.instruction |= inst.operands[3].reg << 8;
6735
6736 if (inst.operands[0].reg == inst.operands[1].reg)
6737 as_tsktsk (_("rdhi and rdlo must be different"));
6738 }
6739
6740 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6741 SMULxy{cond} Rd,Rm,Rs
6742 Error if any register is R15. */
6743
6744 static void
6745 do_smul (void)
6746 {
6747 inst.instruction |= inst.operands[0].reg << 16;
6748 inst.instruction |= inst.operands[1].reg;
6749 inst.instruction |= inst.operands[2].reg << 8;
6750 }
6751
6752 /* ARM V6 srs (argument parse). */
6753
6754 static void
6755 do_srs (void)
6756 {
6757 inst.instruction |= inst.operands[0].imm;
6758 if (inst.operands[0].writeback)
6759 inst.instruction |= WRITE_BACK;
6760 }
6761
6762 /* ARM V6 strex (argument parse). */
6763
6764 static void
6765 do_strex (void)
6766 {
6767 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
6768 || inst.operands[2].postind || inst.operands[2].writeback
6769 || inst.operands[2].immisreg || inst.operands[2].shifted
6770 || inst.operands[2].negative
6771 /* See comment in do_ldrex(). */
6772 || (inst.operands[2].reg == REG_PC),
6773 BAD_ADDR_MODE);
6774
6775 constraint (inst.operands[0].reg == inst.operands[1].reg
6776 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
6777
6778 constraint (inst.reloc.exp.X_op != O_constant
6779 || inst.reloc.exp.X_add_number != 0,
6780 _("offset must be zero in ARM encoding"));
6781
6782 inst.instruction |= inst.operands[0].reg << 12;
6783 inst.instruction |= inst.operands[1].reg;
6784 inst.instruction |= inst.operands[2].reg << 16;
6785 inst.reloc.type = BFD_RELOC_UNUSED;
6786 }
6787
6788 static void
6789 do_strexd (void)
6790 {
6791 constraint (inst.operands[1].reg % 2 != 0,
6792 _("even register required"));
6793 constraint (inst.operands[2].present
6794 && inst.operands[2].reg != inst.operands[1].reg + 1,
6795 _("can only store two consecutive registers"));
6796 /* If op 2 were present and equal to PC, this function wouldn't
6797 have been called in the first place. */
6798 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
6799
6800 constraint (inst.operands[0].reg == inst.operands[1].reg
6801 || inst.operands[0].reg == inst.operands[1].reg + 1
6802 || inst.operands[0].reg == inst.operands[3].reg,
6803 BAD_OVERLAP);
6804
6805 inst.instruction |= inst.operands[0].reg << 12;
6806 inst.instruction |= inst.operands[1].reg;
6807 inst.instruction |= inst.operands[3].reg << 16;
6808 }
6809
6810 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6811 extends it to 32-bits, and adds the result to a value in another
6812 register. You can specify a rotation by 0, 8, 16, or 24 bits
6813 before extracting the 16-bit value.
6814 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6815 Condition defaults to COND_ALWAYS.
6816 Error if any register uses R15. */
6817
6818 static void
6819 do_sxtah (void)
6820 {
6821 inst.instruction |= inst.operands[0].reg << 12;
6822 inst.instruction |= inst.operands[1].reg << 16;
6823 inst.instruction |= inst.operands[2].reg;
6824 inst.instruction |= inst.operands[3].imm << 10;
6825 }
6826
6827 /* ARM V6 SXTH.
6828
6829 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6830 Condition defaults to COND_ALWAYS.
6831 Error if any register uses R15. */
6832
6833 static void
6834 do_sxth (void)
6835 {
6836 inst.instruction |= inst.operands[0].reg << 12;
6837 inst.instruction |= inst.operands[1].reg;
6838 inst.instruction |= inst.operands[2].imm << 10;
6839 }
6840 \f
6841 /* VFP instructions. In a logical order: SP variant first, monad
6842 before dyad, arithmetic then move then load/store. */
6843
6844 static void
6845 do_vfp_sp_monadic (void)
6846 {
6847 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6848 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6849 }
6850
6851 static void
6852 do_vfp_sp_dyadic (void)
6853 {
6854 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6855 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6856 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6857 }
6858
6859 static void
6860 do_vfp_sp_compare_z (void)
6861 {
6862 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6863 }
6864
6865 static void
6866 do_vfp_dp_sp_cvt (void)
6867 {
6868 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6869 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6870 }
6871
6872 static void
6873 do_vfp_sp_dp_cvt (void)
6874 {
6875 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6876 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6877 }
6878
6879 static void
6880 do_vfp_reg_from_sp (void)
6881 {
6882 inst.instruction |= inst.operands[0].reg << 12;
6883 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6884 }
6885
6886 static void
6887 do_vfp_reg2_from_sp2 (void)
6888 {
6889 constraint (inst.operands[2].imm != 2,
6890 _("only two consecutive VFP SP registers allowed here"));
6891 inst.instruction |= inst.operands[0].reg << 12;
6892 inst.instruction |= inst.operands[1].reg << 16;
6893 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6894 }
6895
6896 static void
6897 do_vfp_sp_from_reg (void)
6898 {
6899 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
6900 inst.instruction |= inst.operands[1].reg << 12;
6901 }
6902
6903 static void
6904 do_vfp_sp2_from_reg2 (void)
6905 {
6906 constraint (inst.operands[0].imm != 2,
6907 _("only two consecutive VFP SP registers allowed here"));
6908 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
6909 inst.instruction |= inst.operands[1].reg << 12;
6910 inst.instruction |= inst.operands[2].reg << 16;
6911 }
6912
6913 static void
6914 do_vfp_sp_ldst (void)
6915 {
6916 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6917 encode_arm_cp_address (1, FALSE, TRUE, 0);
6918 }
6919
6920 static void
6921 do_vfp_dp_ldst (void)
6922 {
6923 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6924 encode_arm_cp_address (1, FALSE, TRUE, 0);
6925 }
6926
6927
6928 static void
6929 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
6930 {
6931 if (inst.operands[0].writeback)
6932 inst.instruction |= WRITE_BACK;
6933 else
6934 constraint (ldstm_type != VFP_LDSTMIA,
6935 _("this addressing mode requires base-register writeback"));
6936 inst.instruction |= inst.operands[0].reg << 16;
6937 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
6938 inst.instruction |= inst.operands[1].imm;
6939 }
6940
6941 static void
6942 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
6943 {
6944 int count;
6945
6946 if (inst.operands[0].writeback)
6947 inst.instruction |= WRITE_BACK;
6948 else
6949 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
6950 _("this addressing mode requires base-register writeback"));
6951
6952 inst.instruction |= inst.operands[0].reg << 16;
6953 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
6954
6955 count = inst.operands[1].imm << 1;
6956 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
6957 count += 1;
6958
6959 inst.instruction |= count;
6960 }
6961
6962 static void
6963 do_vfp_sp_ldstmia (void)
6964 {
6965 vfp_sp_ldstm (VFP_LDSTMIA);
6966 }
6967
6968 static void
6969 do_vfp_sp_ldstmdb (void)
6970 {
6971 vfp_sp_ldstm (VFP_LDSTMDB);
6972 }
6973
6974 static void
6975 do_vfp_dp_ldstmia (void)
6976 {
6977 vfp_dp_ldstm (VFP_LDSTMIA);
6978 }
6979
6980 static void
6981 do_vfp_dp_ldstmdb (void)
6982 {
6983 vfp_dp_ldstm (VFP_LDSTMDB);
6984 }
6985
6986 static void
6987 do_vfp_xp_ldstmia (void)
6988 {
6989 vfp_dp_ldstm (VFP_LDSTMIAX);
6990 }
6991
6992 static void
6993 do_vfp_xp_ldstmdb (void)
6994 {
6995 vfp_dp_ldstm (VFP_LDSTMDBX);
6996 }
6997
6998 static void
6999 do_vfp_dp_rd_rm (void)
7000 {
7001 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7002 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7003 }
7004
7005 static void
7006 do_vfp_dp_rn_rd (void)
7007 {
7008 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7009 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7010 }
7011
7012 static void
7013 do_vfp_dp_rd_rn (void)
7014 {
7015 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7016 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7017 }
7018
7019 static void
7020 do_vfp_dp_rd_rn_rm (void)
7021 {
7022 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7023 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7024 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7025 }
7026
7027 static void
7028 do_vfp_dp_rd (void)
7029 {
7030 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7031 }
7032
7033 static void
7034 do_vfp_dp_rm_rd_rn (void)
7035 {
7036 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7037 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7038 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7039 }
7040
7041 /* VFPv3 instructions. */
7042 static void
7043 do_vfp_sp_const (void)
7044 {
7045 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7046 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7047 inst.instruction |= (inst.operands[1].imm >> 4);
7048 }
7049
7050 static void
7051 do_vfp_dp_const (void)
7052 {
7053 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7054 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7055 inst.instruction |= (inst.operands[1].imm >> 4);
7056 }
7057
7058 static void
7059 vfp_conv (int srcsize)
7060 {
7061 unsigned immbits = srcsize - inst.operands[1].imm;
7062 inst.instruction |= (immbits & 1) << 5;
7063 inst.instruction |= (immbits >> 1);
7064 }
7065
7066 static void
7067 do_vfp_sp_conv_16 (void)
7068 {
7069 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7070 vfp_conv (16);
7071 }
7072
7073 static void
7074 do_vfp_dp_conv_16 (void)
7075 {
7076 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7077 vfp_conv (16);
7078 }
7079
7080 static void
7081 do_vfp_sp_conv_32 (void)
7082 {
7083 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7084 vfp_conv (32);
7085 }
7086
7087 static void
7088 do_vfp_dp_conv_32 (void)
7089 {
7090 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7091 vfp_conv (32);
7092 }
7093
7094 \f
7095 /* FPA instructions. Also in a logical order. */
7096
7097 static void
7098 do_fpa_cmp (void)
7099 {
7100 inst.instruction |= inst.operands[0].reg << 16;
7101 inst.instruction |= inst.operands[1].reg;
7102 }
7103
7104 static void
7105 do_fpa_ldmstm (void)
7106 {
7107 inst.instruction |= inst.operands[0].reg << 12;
7108 switch (inst.operands[1].imm)
7109 {
7110 case 1: inst.instruction |= CP_T_X; break;
7111 case 2: inst.instruction |= CP_T_Y; break;
7112 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7113 case 4: break;
7114 default: abort ();
7115 }
7116
7117 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7118 {
7119 /* The instruction specified "ea" or "fd", so we can only accept
7120 [Rn]{!}. The instruction does not really support stacking or
7121 unstacking, so we have to emulate these by setting appropriate
7122 bits and offsets. */
7123 constraint (inst.reloc.exp.X_op != O_constant
7124 || inst.reloc.exp.X_add_number != 0,
7125 _("this instruction does not support indexing"));
7126
7127 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7128 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7129
7130 if (!(inst.instruction & INDEX_UP))
7131 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7132
7133 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7134 {
7135 inst.operands[2].preind = 0;
7136 inst.operands[2].postind = 1;
7137 }
7138 }
7139
7140 encode_arm_cp_address (2, TRUE, TRUE, 0);
7141 }
7142 \f
7143 /* iWMMXt instructions: strictly in alphabetical order. */
7144
7145 static void
7146 do_iwmmxt_tandorc (void)
7147 {
7148 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7149 }
7150
7151 static void
7152 do_iwmmxt_textrc (void)
7153 {
7154 inst.instruction |= inst.operands[0].reg << 12;
7155 inst.instruction |= inst.operands[1].imm;
7156 }
7157
7158 static void
7159 do_iwmmxt_textrm (void)
7160 {
7161 inst.instruction |= inst.operands[0].reg << 12;
7162 inst.instruction |= inst.operands[1].reg << 16;
7163 inst.instruction |= inst.operands[2].imm;
7164 }
7165
7166 static void
7167 do_iwmmxt_tinsr (void)
7168 {
7169 inst.instruction |= inst.operands[0].reg << 16;
7170 inst.instruction |= inst.operands[1].reg << 12;
7171 inst.instruction |= inst.operands[2].imm;
7172 }
7173
7174 static void
7175 do_iwmmxt_tmia (void)
7176 {
7177 inst.instruction |= inst.operands[0].reg << 5;
7178 inst.instruction |= inst.operands[1].reg;
7179 inst.instruction |= inst.operands[2].reg << 12;
7180 }
7181
7182 static void
7183 do_iwmmxt_waligni (void)
7184 {
7185 inst.instruction |= inst.operands[0].reg << 12;
7186 inst.instruction |= inst.operands[1].reg << 16;
7187 inst.instruction |= inst.operands[2].reg;
7188 inst.instruction |= inst.operands[3].imm << 20;
7189 }
7190
7191 static void
7192 do_iwmmxt_wmov (void)
7193 {
7194 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7195 inst.instruction |= inst.operands[0].reg << 12;
7196 inst.instruction |= inst.operands[1].reg << 16;
7197 inst.instruction |= inst.operands[1].reg;
7198 }
7199
7200 static void
7201 do_iwmmxt_wldstbh (void)
7202 {
7203 int reloc;
7204 inst.instruction |= inst.operands[0].reg << 12;
7205 if (thumb_mode)
7206 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7207 else
7208 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7209 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7210 }
7211
7212 static void
7213 do_iwmmxt_wldstw (void)
7214 {
7215 /* RIWR_RIWC clears .isreg for a control register. */
7216 if (!inst.operands[0].isreg)
7217 {
7218 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7219 inst.instruction |= 0xf0000000;
7220 }
7221
7222 inst.instruction |= inst.operands[0].reg << 12;
7223 encode_arm_cp_address (1, TRUE, TRUE, 0);
7224 }
7225
7226 static void
7227 do_iwmmxt_wldstd (void)
7228 {
7229 inst.instruction |= inst.operands[0].reg << 12;
7230 encode_arm_cp_address (1, TRUE, FALSE, 0);
7231 }
7232
7233 static void
7234 do_iwmmxt_wshufh (void)
7235 {
7236 inst.instruction |= inst.operands[0].reg << 12;
7237 inst.instruction |= inst.operands[1].reg << 16;
7238 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7239 inst.instruction |= (inst.operands[2].imm & 0x0f);
7240 }
7241
7242 static void
7243 do_iwmmxt_wzero (void)
7244 {
7245 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7246 inst.instruction |= inst.operands[0].reg;
7247 inst.instruction |= inst.operands[0].reg << 12;
7248 inst.instruction |= inst.operands[0].reg << 16;
7249 }
7250 \f
7251 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7252 operations first, then control, shift, and load/store. */
7253
7254 /* Insns like "foo X,Y,Z". */
7255
7256 static void
7257 do_mav_triple (void)
7258 {
7259 inst.instruction |= inst.operands[0].reg << 16;
7260 inst.instruction |= inst.operands[1].reg;
7261 inst.instruction |= inst.operands[2].reg << 12;
7262 }
7263
7264 /* Insns like "foo W,X,Y,Z".
7265 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7266
7267 static void
7268 do_mav_quad (void)
7269 {
7270 inst.instruction |= inst.operands[0].reg << 5;
7271 inst.instruction |= inst.operands[1].reg << 12;
7272 inst.instruction |= inst.operands[2].reg << 16;
7273 inst.instruction |= inst.operands[3].reg;
7274 }
7275
7276 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7277 static void
7278 do_mav_dspsc (void)
7279 {
7280 inst.instruction |= inst.operands[1].reg << 12;
7281 }
7282
7283 /* Maverick shift immediate instructions.
7284 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7285 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7286
7287 static void
7288 do_mav_shift (void)
7289 {
7290 int imm = inst.operands[2].imm;
7291
7292 inst.instruction |= inst.operands[0].reg << 12;
7293 inst.instruction |= inst.operands[1].reg << 16;
7294
7295 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7296 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7297 Bit 4 should be 0. */
7298 imm = (imm & 0xf) | ((imm & 0x70) << 1);
7299
7300 inst.instruction |= imm;
7301 }
7302 \f
7303 /* XScale instructions. Also sorted arithmetic before move. */
7304
7305 /* Xscale multiply-accumulate (argument parse)
7306 MIAcc acc0,Rm,Rs
7307 MIAPHcc acc0,Rm,Rs
7308 MIAxycc acc0,Rm,Rs. */
7309
7310 static void
7311 do_xsc_mia (void)
7312 {
7313 inst.instruction |= inst.operands[1].reg;
7314 inst.instruction |= inst.operands[2].reg << 12;
7315 }
7316
7317 /* Xscale move-accumulator-register (argument parse)
7318
7319 MARcc acc0,RdLo,RdHi. */
7320
7321 static void
7322 do_xsc_mar (void)
7323 {
7324 inst.instruction |= inst.operands[1].reg << 12;
7325 inst.instruction |= inst.operands[2].reg << 16;
7326 }
7327
7328 /* Xscale move-register-accumulator (argument parse)
7329
7330 MRAcc RdLo,RdHi,acc0. */
7331
7332 static void
7333 do_xsc_mra (void)
7334 {
7335 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
7336 inst.instruction |= inst.operands[0].reg << 12;
7337 inst.instruction |= inst.operands[1].reg << 16;
7338 }
7339 \f
7340 /* Encoding functions relevant only to Thumb. */
7341
7342 /* inst.operands[i] is a shifted-register operand; encode
7343 it into inst.instruction in the format used by Thumb32. */
7344
7345 static void
7346 encode_thumb32_shifted_operand (int i)
7347 {
7348 unsigned int value = inst.reloc.exp.X_add_number;
7349 unsigned int shift = inst.operands[i].shift_kind;
7350
7351 constraint (inst.operands[i].immisreg,
7352 _("shift by register not allowed in thumb mode"));
7353 inst.instruction |= inst.operands[i].reg;
7354 if (shift == SHIFT_RRX)
7355 inst.instruction |= SHIFT_ROR << 4;
7356 else
7357 {
7358 constraint (inst.reloc.exp.X_op != O_constant,
7359 _("expression too complex"));
7360
7361 constraint (value > 32
7362 || (value == 32 && (shift == SHIFT_LSL
7363 || shift == SHIFT_ROR)),
7364 _("shift expression is too large"));
7365
7366 if (value == 0)
7367 shift = SHIFT_LSL;
7368 else if (value == 32)
7369 value = 0;
7370
7371 inst.instruction |= shift << 4;
7372 inst.instruction |= (value & 0x1c) << 10;
7373 inst.instruction |= (value & 0x03) << 6;
7374 }
7375 }
7376
7377
7378 /* inst.operands[i] was set up by parse_address. Encode it into a
7379 Thumb32 format load or store instruction. Reject forms that cannot
7380 be used with such instructions. If is_t is true, reject forms that
7381 cannot be used with a T instruction; if is_d is true, reject forms
7382 that cannot be used with a D instruction. */
7383
7384 static void
7385 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
7386 {
7387 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7388
7389 constraint (!inst.operands[i].isreg,
7390 _("Instruction does not support =N addresses"));
7391
7392 inst.instruction |= inst.operands[i].reg << 16;
7393 if (inst.operands[i].immisreg)
7394 {
7395 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
7396 constraint (is_t || is_d, _("cannot use register index with this instruction"));
7397 constraint (inst.operands[i].negative,
7398 _("Thumb does not support negative register indexing"));
7399 constraint (inst.operands[i].postind,
7400 _("Thumb does not support register post-indexing"));
7401 constraint (inst.operands[i].writeback,
7402 _("Thumb does not support register indexing with writeback"));
7403 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
7404 _("Thumb supports only LSL in shifted register indexing"));
7405
7406 inst.instruction |= inst.operands[i].imm;
7407 if (inst.operands[i].shifted)
7408 {
7409 constraint (inst.reloc.exp.X_op != O_constant,
7410 _("expression too complex"));
7411 constraint (inst.reloc.exp.X_add_number < 0
7412 || inst.reloc.exp.X_add_number > 3,
7413 _("shift out of range"));
7414 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7415 }
7416 inst.reloc.type = BFD_RELOC_UNUSED;
7417 }
7418 else if (inst.operands[i].preind)
7419 {
7420 constraint (is_pc && inst.operands[i].writeback,
7421 _("cannot use writeback with PC-relative addressing"));
7422 constraint (is_t && inst.operands[i].writeback,
7423 _("cannot use writeback with this instruction"));
7424
7425 if (is_d)
7426 {
7427 inst.instruction |= 0x01000000;
7428 if (inst.operands[i].writeback)
7429 inst.instruction |= 0x00200000;
7430 }
7431 else
7432 {
7433 inst.instruction |= 0x00000c00;
7434 if (inst.operands[i].writeback)
7435 inst.instruction |= 0x00000100;
7436 }
7437 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7438 }
7439 else if (inst.operands[i].postind)
7440 {
7441 assert (inst.operands[i].writeback);
7442 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
7443 constraint (is_t, _("cannot use post-indexing with this instruction"));
7444
7445 if (is_d)
7446 inst.instruction |= 0x00200000;
7447 else
7448 inst.instruction |= 0x00000900;
7449 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7450 }
7451 else /* unindexed - only for coprocessor */
7452 inst.error = _("instruction does not accept unindexed addressing");
7453 }
7454
7455 /* Table of Thumb instructions which exist in both 16- and 32-bit
7456 encodings (the latter only in post-V6T2 cores). The index is the
7457 value used in the insns table below. When there is more than one
7458 possible 16-bit encoding for the instruction, this table always
7459 holds variant (1).
7460 Also contains several pseudo-instructions used during relaxation. */
7461 #define T16_32_TAB \
7462 X(adc, 4140, eb400000), \
7463 X(adcs, 4140, eb500000), \
7464 X(add, 1c00, eb000000), \
7465 X(adds, 1c00, eb100000), \
7466 X(addi, 0000, f1000000), \
7467 X(addis, 0000, f1100000), \
7468 X(add_pc,000f, f20f0000), \
7469 X(add_sp,000d, f10d0000), \
7470 X(adr, 000f, f20f0000), \
7471 X(and, 4000, ea000000), \
7472 X(ands, 4000, ea100000), \
7473 X(asr, 1000, fa40f000), \
7474 X(asrs, 1000, fa50f000), \
7475 X(b, e000, f000b000), \
7476 X(bcond, d000, f0008000), \
7477 X(bic, 4380, ea200000), \
7478 X(bics, 4380, ea300000), \
7479 X(cmn, 42c0, eb100f00), \
7480 X(cmp, 2800, ebb00f00), \
7481 X(cpsie, b660, f3af8400), \
7482 X(cpsid, b670, f3af8600), \
7483 X(cpy, 4600, ea4f0000), \
7484 X(dec_sp,80dd, f1bd0d00), \
7485 X(eor, 4040, ea800000), \
7486 X(eors, 4040, ea900000), \
7487 X(inc_sp,00dd, f10d0d00), \
7488 X(ldmia, c800, e8900000), \
7489 X(ldr, 6800, f8500000), \
7490 X(ldrb, 7800, f8100000), \
7491 X(ldrh, 8800, f8300000), \
7492 X(ldrsb, 5600, f9100000), \
7493 X(ldrsh, 5e00, f9300000), \
7494 X(ldr_pc,4800, f85f0000), \
7495 X(ldr_pc2,4800, f85f0000), \
7496 X(ldr_sp,9800, f85d0000), \
7497 X(lsl, 0000, fa00f000), \
7498 X(lsls, 0000, fa10f000), \
7499 X(lsr, 0800, fa20f000), \
7500 X(lsrs, 0800, fa30f000), \
7501 X(mov, 2000, ea4f0000), \
7502 X(movs, 2000, ea5f0000), \
7503 X(mul, 4340, fb00f000), \
7504 X(muls, 4340, ffffffff), /* no 32b muls */ \
7505 X(mvn, 43c0, ea6f0000), \
7506 X(mvns, 43c0, ea7f0000), \
7507 X(neg, 4240, f1c00000), /* rsb #0 */ \
7508 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7509 X(orr, 4300, ea400000), \
7510 X(orrs, 4300, ea500000), \
7511 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7512 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7513 X(rev, ba00, fa90f080), \
7514 X(rev16, ba40, fa90f090), \
7515 X(revsh, bac0, fa90f0b0), \
7516 X(ror, 41c0, fa60f000), \
7517 X(rors, 41c0, fa70f000), \
7518 X(sbc, 4180, eb600000), \
7519 X(sbcs, 4180, eb700000), \
7520 X(stmia, c000, e8800000), \
7521 X(str, 6000, f8400000), \
7522 X(strb, 7000, f8000000), \
7523 X(strh, 8000, f8200000), \
7524 X(str_sp,9000, f84d0000), \
7525 X(sub, 1e00, eba00000), \
7526 X(subs, 1e00, ebb00000), \
7527 X(subi, 8000, f1a00000), \
7528 X(subis, 8000, f1b00000), \
7529 X(sxtb, b240, fa4ff080), \
7530 X(sxth, b200, fa0ff080), \
7531 X(tst, 4200, ea100f00), \
7532 X(uxtb, b2c0, fa5ff080), \
7533 X(uxth, b280, fa1ff080), \
7534 X(nop, bf00, f3af8000), \
7535 X(yield, bf10, f3af8001), \
7536 X(wfe, bf20, f3af8002), \
7537 X(wfi, bf30, f3af8003), \
7538 X(sev, bf40, f3af9004), /* typo, 8004? */
7539
7540 /* To catch errors in encoding functions, the codes are all offset by
7541 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7542 as 16-bit instructions. */
7543 #define X(a,b,c) T_MNEM_##a
7544 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
7545 #undef X
7546
7547 #define X(a,b,c) 0x##b
7548 static const unsigned short thumb_op16[] = { T16_32_TAB };
7549 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7550 #undef X
7551
7552 #define X(a,b,c) 0x##c
7553 static const unsigned int thumb_op32[] = { T16_32_TAB };
7554 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7555 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7556 #undef X
7557 #undef T16_32_TAB
7558
7559 /* Thumb instruction encoders, in alphabetical order. */
7560
7561 /* ADDW or SUBW. */
7562 static void
7563 do_t_add_sub_w (void)
7564 {
7565 int Rd, Rn;
7566
7567 Rd = inst.operands[0].reg;
7568 Rn = inst.operands[1].reg;
7569
7570 constraint (Rd == 15, _("PC not allowed as destination"));
7571 inst.instruction |= (Rn << 16) | (Rd << 8);
7572 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
7573 }
7574
7575 /* Parse an add or subtract instruction. We get here with inst.instruction
7576 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7577
7578 static void
7579 do_t_add_sub (void)
7580 {
7581 int Rd, Rs, Rn;
7582
7583 Rd = inst.operands[0].reg;
7584 Rs = (inst.operands[1].present
7585 ? inst.operands[1].reg /* Rd, Rs, foo */
7586 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7587
7588 if (unified_syntax)
7589 {
7590 bfd_boolean flags;
7591 bfd_boolean narrow;
7592 int opcode;
7593
7594 flags = (inst.instruction == T_MNEM_adds
7595 || inst.instruction == T_MNEM_subs);
7596 if (flags)
7597 narrow = (current_it_mask == 0);
7598 else
7599 narrow = (current_it_mask != 0);
7600 if (!inst.operands[2].isreg)
7601 {
7602 opcode = 0;
7603 if (inst.size_req != 4)
7604 {
7605 int add;
7606
7607 add = (inst.instruction == T_MNEM_add
7608 || inst.instruction == T_MNEM_adds);
7609 /* Attempt to use a narrow opcode, with relaxation if
7610 appropriate. */
7611 if (Rd == REG_SP && Rs == REG_SP && !flags)
7612 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
7613 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
7614 opcode = T_MNEM_add_sp;
7615 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
7616 opcode = T_MNEM_add_pc;
7617 else if (Rd <= 7 && Rs <= 7 && narrow)
7618 {
7619 if (flags)
7620 opcode = add ? T_MNEM_addis : T_MNEM_subis;
7621 else
7622 opcode = add ? T_MNEM_addi : T_MNEM_subi;
7623 }
7624 if (opcode)
7625 {
7626 inst.instruction = THUMB_OP16(opcode);
7627 inst.instruction |= (Rd << 4) | Rs;
7628 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7629 if (inst.size_req != 2)
7630 inst.relax = opcode;
7631 }
7632 else
7633 constraint (inst.size_req == 2, BAD_HIREG);
7634 }
7635 if (inst.size_req == 4
7636 || (inst.size_req != 2 && !opcode))
7637 {
7638 /* ??? Convert large immediates to addw/subw. */
7639 inst.instruction = THUMB_OP32 (inst.instruction);
7640 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7641 inst.instruction |= inst.operands[0].reg << 8;
7642 inst.instruction |= inst.operands[1].reg << 16;
7643 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7644 }
7645 }
7646 else
7647 {
7648 Rn = inst.operands[2].reg;
7649 /* See if we can do this with a 16-bit instruction. */
7650 if (!inst.operands[2].shifted && inst.size_req != 4)
7651 {
7652 if (Rd > 7 || Rs > 7 || Rn > 7)
7653 narrow = FALSE;
7654
7655 if (narrow)
7656 {
7657 inst.instruction = ((inst.instruction == T_MNEM_adds
7658 || inst.instruction == T_MNEM_add)
7659 ? T_OPCODE_ADD_R3
7660 : T_OPCODE_SUB_R3);
7661 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7662 return;
7663 }
7664
7665 if (inst.instruction == T_MNEM_add)
7666 {
7667 if (Rd == Rs)
7668 {
7669 inst.instruction = T_OPCODE_ADD_HI;
7670 inst.instruction |= (Rd & 8) << 4;
7671 inst.instruction |= (Rd & 7);
7672 inst.instruction |= Rn << 3;
7673 return;
7674 }
7675 /* ... because addition is commutative! */
7676 else if (Rd == Rn)
7677 {
7678 inst.instruction = T_OPCODE_ADD_HI;
7679 inst.instruction |= (Rd & 8) << 4;
7680 inst.instruction |= (Rd & 7);
7681 inst.instruction |= Rs << 3;
7682 return;
7683 }
7684 }
7685 }
7686 /* If we get here, it can't be done in 16 bits. */
7687 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
7688 _("shift must be constant"));
7689 inst.instruction = THUMB_OP32 (inst.instruction);
7690 inst.instruction |= Rd << 8;
7691 inst.instruction |= Rs << 16;
7692 encode_thumb32_shifted_operand (2);
7693 }
7694 }
7695 else
7696 {
7697 constraint (inst.instruction == T_MNEM_adds
7698 || inst.instruction == T_MNEM_subs,
7699 BAD_THUMB32);
7700
7701 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
7702 {
7703 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
7704 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
7705 BAD_HIREG);
7706
7707 inst.instruction = (inst.instruction == T_MNEM_add
7708 ? 0x0000 : 0x8000);
7709 inst.instruction |= (Rd << 4) | Rs;
7710 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7711 return;
7712 }
7713
7714 Rn = inst.operands[2].reg;
7715 constraint (inst.operands[2].shifted, _("unshifted register required"));
7716
7717 /* We now have Rd, Rs, and Rn set to registers. */
7718 if (Rd > 7 || Rs > 7 || Rn > 7)
7719 {
7720 /* Can't do this for SUB. */
7721 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
7722 inst.instruction = T_OPCODE_ADD_HI;
7723 inst.instruction |= (Rd & 8) << 4;
7724 inst.instruction |= (Rd & 7);
7725 if (Rs == Rd)
7726 inst.instruction |= Rn << 3;
7727 else if (Rn == Rd)
7728 inst.instruction |= Rs << 3;
7729 else
7730 constraint (1, _("dest must overlap one source register"));
7731 }
7732 else
7733 {
7734 inst.instruction = (inst.instruction == T_MNEM_add
7735 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
7736 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7737 }
7738 }
7739 }
7740
7741 static void
7742 do_t_adr (void)
7743 {
7744 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
7745 {
7746 /* Defer to section relaxation. */
7747 inst.relax = inst.instruction;
7748 inst.instruction = THUMB_OP16 (inst.instruction);
7749 inst.instruction |= inst.operands[0].reg << 4;
7750 }
7751 else if (unified_syntax && inst.size_req != 2)
7752 {
7753 /* Generate a 32-bit opcode. */
7754 inst.instruction = THUMB_OP32 (inst.instruction);
7755 inst.instruction |= inst.operands[0].reg << 8;
7756 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
7757 inst.reloc.pc_rel = 1;
7758 }
7759 else
7760 {
7761 /* Generate a 16-bit opcode. */
7762 inst.instruction = THUMB_OP16 (inst.instruction);
7763 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7764 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
7765 inst.reloc.pc_rel = 1;
7766
7767 inst.instruction |= inst.operands[0].reg << 4;
7768 }
7769 }
7770
7771 /* Arithmetic instructions for which there is just one 16-bit
7772 instruction encoding, and it allows only two low registers.
7773 For maximal compatibility with ARM syntax, we allow three register
7774 operands even when Thumb-32 instructions are not available, as long
7775 as the first two are identical. For instance, both "sbc r0,r1" and
7776 "sbc r0,r0,r1" are allowed. */
7777 static void
7778 do_t_arit3 (void)
7779 {
7780 int Rd, Rs, Rn;
7781
7782 Rd = inst.operands[0].reg;
7783 Rs = (inst.operands[1].present
7784 ? inst.operands[1].reg /* Rd, Rs, foo */
7785 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7786 Rn = inst.operands[2].reg;
7787
7788 if (unified_syntax)
7789 {
7790 if (!inst.operands[2].isreg)
7791 {
7792 /* For an immediate, we always generate a 32-bit opcode;
7793 section relaxation will shrink it later if possible. */
7794 inst.instruction = THUMB_OP32 (inst.instruction);
7795 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7796 inst.instruction |= Rd << 8;
7797 inst.instruction |= Rs << 16;
7798 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7799 }
7800 else
7801 {
7802 bfd_boolean narrow;
7803
7804 /* See if we can do this with a 16-bit instruction. */
7805 if (THUMB_SETS_FLAGS (inst.instruction))
7806 narrow = current_it_mask == 0;
7807 else
7808 narrow = current_it_mask != 0;
7809
7810 if (Rd > 7 || Rn > 7 || Rs > 7)
7811 narrow = FALSE;
7812 if (inst.operands[2].shifted)
7813 narrow = FALSE;
7814 if (inst.size_req == 4)
7815 narrow = FALSE;
7816
7817 if (narrow
7818 && Rd == Rs)
7819 {
7820 inst.instruction = THUMB_OP16 (inst.instruction);
7821 inst.instruction |= Rd;
7822 inst.instruction |= Rn << 3;
7823 return;
7824 }
7825
7826 /* If we get here, it can't be done in 16 bits. */
7827 constraint (inst.operands[2].shifted
7828 && inst.operands[2].immisreg,
7829 _("shift must be constant"));
7830 inst.instruction = THUMB_OP32 (inst.instruction);
7831 inst.instruction |= Rd << 8;
7832 inst.instruction |= Rs << 16;
7833 encode_thumb32_shifted_operand (2);
7834 }
7835 }
7836 else
7837 {
7838 /* On its face this is a lie - the instruction does set the
7839 flags. However, the only supported mnemonic in this mode
7840 says it doesn't. */
7841 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7842
7843 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7844 _("unshifted register required"));
7845 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7846 constraint (Rd != Rs,
7847 _("dest and source1 must be the same register"));
7848
7849 inst.instruction = THUMB_OP16 (inst.instruction);
7850 inst.instruction |= Rd;
7851 inst.instruction |= Rn << 3;
7852 }
7853 }
7854
7855 /* Similarly, but for instructions where the arithmetic operation is
7856 commutative, so we can allow either of them to be different from
7857 the destination operand in a 16-bit instruction. For instance, all
7858 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7859 accepted. */
7860 static void
7861 do_t_arit3c (void)
7862 {
7863 int Rd, Rs, Rn;
7864
7865 Rd = inst.operands[0].reg;
7866 Rs = (inst.operands[1].present
7867 ? inst.operands[1].reg /* Rd, Rs, foo */
7868 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7869 Rn = inst.operands[2].reg;
7870
7871 if (unified_syntax)
7872 {
7873 if (!inst.operands[2].isreg)
7874 {
7875 /* For an immediate, we always generate a 32-bit opcode;
7876 section relaxation will shrink it later if possible. */
7877 inst.instruction = THUMB_OP32 (inst.instruction);
7878 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7879 inst.instruction |= Rd << 8;
7880 inst.instruction |= Rs << 16;
7881 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7882 }
7883 else
7884 {
7885 bfd_boolean narrow;
7886
7887 /* See if we can do this with a 16-bit instruction. */
7888 if (THUMB_SETS_FLAGS (inst.instruction))
7889 narrow = current_it_mask == 0;
7890 else
7891 narrow = current_it_mask != 0;
7892
7893 if (Rd > 7 || Rn > 7 || Rs > 7)
7894 narrow = FALSE;
7895 if (inst.operands[2].shifted)
7896 narrow = FALSE;
7897 if (inst.size_req == 4)
7898 narrow = FALSE;
7899
7900 if (narrow)
7901 {
7902 if (Rd == Rs)
7903 {
7904 inst.instruction = THUMB_OP16 (inst.instruction);
7905 inst.instruction |= Rd;
7906 inst.instruction |= Rn << 3;
7907 return;
7908 }
7909 if (Rd == Rn)
7910 {
7911 inst.instruction = THUMB_OP16 (inst.instruction);
7912 inst.instruction |= Rd;
7913 inst.instruction |= Rs << 3;
7914 return;
7915 }
7916 }
7917
7918 /* If we get here, it can't be done in 16 bits. */
7919 constraint (inst.operands[2].shifted
7920 && inst.operands[2].immisreg,
7921 _("shift must be constant"));
7922 inst.instruction = THUMB_OP32 (inst.instruction);
7923 inst.instruction |= Rd << 8;
7924 inst.instruction |= Rs << 16;
7925 encode_thumb32_shifted_operand (2);
7926 }
7927 }
7928 else
7929 {
7930 /* On its face this is a lie - the instruction does set the
7931 flags. However, the only supported mnemonic in this mode
7932 says it doesn't. */
7933 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7934
7935 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7936 _("unshifted register required"));
7937 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7938
7939 inst.instruction = THUMB_OP16 (inst.instruction);
7940 inst.instruction |= Rd;
7941
7942 if (Rd == Rs)
7943 inst.instruction |= Rn << 3;
7944 else if (Rd == Rn)
7945 inst.instruction |= Rs << 3;
7946 else
7947 constraint (1, _("dest must overlap one source register"));
7948 }
7949 }
7950
7951 static void
7952 do_t_barrier (void)
7953 {
7954 if (inst.operands[0].present)
7955 {
7956 constraint ((inst.instruction & 0xf0) != 0x40
7957 && inst.operands[0].imm != 0xf,
7958 "bad barrier type");
7959 inst.instruction |= inst.operands[0].imm;
7960 }
7961 else
7962 inst.instruction |= 0xf;
7963 }
7964
7965 static void
7966 do_t_bfc (void)
7967 {
7968 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7969 constraint (msb > 32, _("bit-field extends past end of register"));
7970 /* The instruction encoding stores the LSB and MSB,
7971 not the LSB and width. */
7972 inst.instruction |= inst.operands[0].reg << 8;
7973 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
7974 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
7975 inst.instruction |= msb - 1;
7976 }
7977
7978 static void
7979 do_t_bfi (void)
7980 {
7981 unsigned int msb;
7982
7983 /* #0 in second position is alternative syntax for bfc, which is
7984 the same instruction but with REG_PC in the Rm field. */
7985 if (!inst.operands[1].isreg)
7986 inst.operands[1].reg = REG_PC;
7987
7988 msb = inst.operands[2].imm + inst.operands[3].imm;
7989 constraint (msb > 32, _("bit-field extends past end of register"));
7990 /* The instruction encoding stores the LSB and MSB,
7991 not the LSB and width. */
7992 inst.instruction |= inst.operands[0].reg << 8;
7993 inst.instruction |= inst.operands[1].reg << 16;
7994 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
7995 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
7996 inst.instruction |= msb - 1;
7997 }
7998
7999 static void
8000 do_t_bfx (void)
8001 {
8002 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8003 _("bit-field extends past end of register"));
8004 inst.instruction |= inst.operands[0].reg << 8;
8005 inst.instruction |= inst.operands[1].reg << 16;
8006 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8007 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8008 inst.instruction |= inst.operands[3].imm - 1;
8009 }
8010
8011 /* ARM V5 Thumb BLX (argument parse)
8012 BLX <target_addr> which is BLX(1)
8013 BLX <Rm> which is BLX(2)
8014 Unfortunately, there are two different opcodes for this mnemonic.
8015 So, the insns[].value is not used, and the code here zaps values
8016 into inst.instruction.
8017
8018 ??? How to take advantage of the additional two bits of displacement
8019 available in Thumb32 mode? Need new relocation? */
8020
8021 static void
8022 do_t_blx (void)
8023 {
8024 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8025 if (inst.operands[0].isreg)
8026 /* We have a register, so this is BLX(2). */
8027 inst.instruction |= inst.operands[0].reg << 3;
8028 else
8029 {
8030 /* No register. This must be BLX(1). */
8031 inst.instruction = 0xf000e800;
8032 #ifdef OBJ_ELF
8033 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8034 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8035 else
8036 #endif
8037 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8038 inst.reloc.pc_rel = 1;
8039 }
8040 }
8041
8042 static void
8043 do_t_branch (void)
8044 {
8045 int opcode;
8046 int cond;
8047
8048 if (current_it_mask)
8049 {
8050 /* Conditional branches inside IT blocks are encoded as unconditional
8051 branches. */
8052 cond = COND_ALWAYS;
8053 /* A branch must be the last instruction in an IT block. */
8054 constraint (current_it_mask != 0x10, BAD_BRANCH);
8055 }
8056 else
8057 cond = inst.cond;
8058
8059 if (cond != COND_ALWAYS)
8060 opcode = T_MNEM_bcond;
8061 else
8062 opcode = inst.instruction;
8063
8064 if (unified_syntax && inst.size_req == 4)
8065 {
8066 inst.instruction = THUMB_OP32(opcode);
8067 if (cond == COND_ALWAYS)
8068 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8069 else
8070 {
8071 assert (cond != 0xF);
8072 inst.instruction |= cond << 22;
8073 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8074 }
8075 }
8076 else
8077 {
8078 inst.instruction = THUMB_OP16(opcode);
8079 if (cond == COND_ALWAYS)
8080 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8081 else
8082 {
8083 inst.instruction |= cond << 8;
8084 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8085 }
8086 /* Allow section relaxation. */
8087 if (unified_syntax && inst.size_req != 2)
8088 inst.relax = opcode;
8089 }
8090
8091 inst.reloc.pc_rel = 1;
8092 }
8093
8094 static void
8095 do_t_bkpt (void)
8096 {
8097 constraint (inst.cond != COND_ALWAYS,
8098 _("instruction is always unconditional"));
8099 if (inst.operands[0].present)
8100 {
8101 constraint (inst.operands[0].imm > 255,
8102 _("immediate value out of range"));
8103 inst.instruction |= inst.operands[0].imm;
8104 }
8105 }
8106
8107 static void
8108 do_t_branch23 (void)
8109 {
8110 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8111 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8112 inst.reloc.pc_rel = 1;
8113
8114 /* If the destination of the branch is a defined symbol which does not have
8115 the THUMB_FUNC attribute, then we must be calling a function which has
8116 the (interfacearm) attribute. We look for the Thumb entry point to that
8117 function and change the branch to refer to that function instead. */
8118 if ( inst.reloc.exp.X_op == O_symbol
8119 && inst.reloc.exp.X_add_symbol != NULL
8120 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8121 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8122 inst.reloc.exp.X_add_symbol =
8123 find_real_start (inst.reloc.exp.X_add_symbol);
8124 }
8125
8126 static void
8127 do_t_bx (void)
8128 {
8129 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8130 inst.instruction |= inst.operands[0].reg << 3;
8131 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8132 should cause the alignment to be checked once it is known. This is
8133 because BX PC only works if the instruction is word aligned. */
8134 }
8135
8136 static void
8137 do_t_bxj (void)
8138 {
8139 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8140 if (inst.operands[0].reg == REG_PC)
8141 as_tsktsk (_("use of r15 in bxj is not really useful"));
8142
8143 inst.instruction |= inst.operands[0].reg << 16;
8144 }
8145
8146 static void
8147 do_t_clz (void)
8148 {
8149 inst.instruction |= inst.operands[0].reg << 8;
8150 inst.instruction |= inst.operands[1].reg << 16;
8151 inst.instruction |= inst.operands[1].reg;
8152 }
8153
8154 static void
8155 do_t_cps (void)
8156 {
8157 constraint (current_it_mask, BAD_NOT_IT);
8158 inst.instruction |= inst.operands[0].imm;
8159 }
8160
8161 static void
8162 do_t_cpsi (void)
8163 {
8164 constraint (current_it_mask, BAD_NOT_IT);
8165 if (unified_syntax
8166 && (inst.operands[1].present || inst.size_req == 4)
8167 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8168 {
8169 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8170 inst.instruction = 0xf3af8000;
8171 inst.instruction |= imod << 9;
8172 inst.instruction |= inst.operands[0].imm << 5;
8173 if (inst.operands[1].present)
8174 inst.instruction |= 0x100 | inst.operands[1].imm;
8175 }
8176 else
8177 {
8178 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8179 && (inst.operands[0].imm & 4),
8180 _("selected processor does not support 'A' form "
8181 "of this instruction"));
8182 constraint (inst.operands[1].present || inst.size_req == 4,
8183 _("Thumb does not support the 2-argument "
8184 "form of this instruction"));
8185 inst.instruction |= inst.operands[0].imm;
8186 }
8187 }
8188
8189 /* THUMB CPY instruction (argument parse). */
8190
8191 static void
8192 do_t_cpy (void)
8193 {
8194 if (inst.size_req == 4)
8195 {
8196 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8197 inst.instruction |= inst.operands[0].reg << 8;
8198 inst.instruction |= inst.operands[1].reg;
8199 }
8200 else
8201 {
8202 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8203 inst.instruction |= (inst.operands[0].reg & 0x7);
8204 inst.instruction |= inst.operands[1].reg << 3;
8205 }
8206 }
8207
8208 static void
8209 do_t_czb (void)
8210 {
8211 constraint (current_it_mask, BAD_NOT_IT);
8212 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8213 inst.instruction |= inst.operands[0].reg;
8214 inst.reloc.pc_rel = 1;
8215 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8216 }
8217
8218 static void
8219 do_t_dbg (void)
8220 {
8221 inst.instruction |= inst.operands[0].imm;
8222 }
8223
8224 static void
8225 do_t_div (void)
8226 {
8227 if (!inst.operands[1].present)
8228 inst.operands[1].reg = inst.operands[0].reg;
8229 inst.instruction |= inst.operands[0].reg << 8;
8230 inst.instruction |= inst.operands[1].reg << 16;
8231 inst.instruction |= inst.operands[2].reg;
8232 }
8233
8234 static void
8235 do_t_hint (void)
8236 {
8237 if (unified_syntax && inst.size_req == 4)
8238 inst.instruction = THUMB_OP32 (inst.instruction);
8239 else
8240 inst.instruction = THUMB_OP16 (inst.instruction);
8241 }
8242
8243 static void
8244 do_t_it (void)
8245 {
8246 unsigned int cond = inst.operands[0].imm;
8247
8248 constraint (current_it_mask, BAD_NOT_IT);
8249 current_it_mask = (inst.instruction & 0xf) | 0x10;
8250 current_cc = cond;
8251
8252 /* If the condition is a negative condition, invert the mask. */
8253 if ((cond & 0x1) == 0x0)
8254 {
8255 unsigned int mask = inst.instruction & 0x000f;
8256
8257 if ((mask & 0x7) == 0)
8258 /* no conversion needed */;
8259 else if ((mask & 0x3) == 0)
8260 mask ^= 0x8;
8261 else if ((mask & 0x1) == 0)
8262 mask ^= 0xC;
8263 else
8264 mask ^= 0xE;
8265
8266 inst.instruction &= 0xfff0;
8267 inst.instruction |= mask;
8268 }
8269
8270 inst.instruction |= cond << 4;
8271 }
8272
8273 static void
8274 do_t_ldmstm (void)
8275 {
8276 /* This really doesn't seem worth it. */
8277 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8278 _("expression too complex"));
8279 constraint (inst.operands[1].writeback,
8280 _("Thumb load/store multiple does not support {reglist}^"));
8281
8282 if (unified_syntax)
8283 {
8284 /* See if we can use a 16-bit instruction. */
8285 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
8286 && inst.size_req != 4
8287 && inst.operands[0].reg <= 7
8288 && !(inst.operands[1].imm & ~0xff)
8289 && (inst.instruction == T_MNEM_stmia
8290 ? inst.operands[0].writeback
8291 : (inst.operands[0].writeback
8292 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
8293 {
8294 if (inst.instruction == T_MNEM_stmia
8295 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
8296 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8297 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8298 inst.operands[0].reg);
8299
8300 inst.instruction = THUMB_OP16 (inst.instruction);
8301 inst.instruction |= inst.operands[0].reg << 8;
8302 inst.instruction |= inst.operands[1].imm;
8303 }
8304 else
8305 {
8306 if (inst.operands[1].imm & (1 << 13))
8307 as_warn (_("SP should not be in register list"));
8308 if (inst.instruction == T_MNEM_stmia)
8309 {
8310 if (inst.operands[1].imm & (1 << 15))
8311 as_warn (_("PC should not be in register list"));
8312 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
8313 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8314 inst.operands[0].reg);
8315 }
8316 else
8317 {
8318 if (inst.operands[1].imm & (1 << 14)
8319 && inst.operands[1].imm & (1 << 15))
8320 as_warn (_("LR and PC should not both be in register list"));
8321 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8322 && inst.operands[0].writeback)
8323 as_warn (_("base register should not be in register list "
8324 "when written back"));
8325 }
8326 if (inst.instruction < 0xffff)
8327 inst.instruction = THUMB_OP32 (inst.instruction);
8328 inst.instruction |= inst.operands[0].reg << 16;
8329 inst.instruction |= inst.operands[1].imm;
8330 if (inst.operands[0].writeback)
8331 inst.instruction |= WRITE_BACK;
8332 }
8333 }
8334 else
8335 {
8336 constraint (inst.operands[0].reg > 7
8337 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
8338 if (inst.instruction == T_MNEM_stmia)
8339 {
8340 if (!inst.operands[0].writeback)
8341 as_warn (_("this instruction will write back the base register"));
8342 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8343 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8344 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8345 inst.operands[0].reg);
8346 }
8347 else
8348 {
8349 if (!inst.operands[0].writeback
8350 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
8351 as_warn (_("this instruction will write back the base register"));
8352 else if (inst.operands[0].writeback
8353 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
8354 as_warn (_("this instruction will not write back the base register"));
8355 }
8356
8357 inst.instruction = THUMB_OP16 (inst.instruction);
8358 inst.instruction |= inst.operands[0].reg << 8;
8359 inst.instruction |= inst.operands[1].imm;
8360 }
8361 }
8362
8363 static void
8364 do_t_ldrex (void)
8365 {
8366 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8367 || inst.operands[1].postind || inst.operands[1].writeback
8368 || inst.operands[1].immisreg || inst.operands[1].shifted
8369 || inst.operands[1].negative,
8370 BAD_ADDR_MODE);
8371
8372 inst.instruction |= inst.operands[0].reg << 12;
8373 inst.instruction |= inst.operands[1].reg << 16;
8374 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
8375 }
8376
8377 static void
8378 do_t_ldrexd (void)
8379 {
8380 if (!inst.operands[1].present)
8381 {
8382 constraint (inst.operands[0].reg == REG_LR,
8383 _("r14 not allowed as first register "
8384 "when second register is omitted"));
8385 inst.operands[1].reg = inst.operands[0].reg + 1;
8386 }
8387 constraint (inst.operands[0].reg == inst.operands[1].reg,
8388 BAD_OVERLAP);
8389
8390 inst.instruction |= inst.operands[0].reg << 12;
8391 inst.instruction |= inst.operands[1].reg << 8;
8392 inst.instruction |= inst.operands[2].reg << 16;
8393 }
8394
8395 static void
8396 do_t_ldst (void)
8397 {
8398 unsigned long opcode;
8399 int Rn;
8400
8401 opcode = inst.instruction;
8402 if (unified_syntax)
8403 {
8404 if (!inst.operands[1].isreg)
8405 {
8406 if (opcode <= 0xffff)
8407 inst.instruction = THUMB_OP32 (opcode);
8408 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8409 return;
8410 }
8411 if (inst.operands[1].isreg
8412 && !inst.operands[1].writeback
8413 && !inst.operands[1].shifted && !inst.operands[1].postind
8414 && !inst.operands[1].negative && inst.operands[0].reg <= 7
8415 && opcode <= 0xffff
8416 && inst.size_req != 4)
8417 {
8418 /* Insn may have a 16-bit form. */
8419 Rn = inst.operands[1].reg;
8420 if (inst.operands[1].immisreg)
8421 {
8422 inst.instruction = THUMB_OP16 (opcode);
8423 /* [Rn, Ri] */
8424 if (Rn <= 7 && inst.operands[1].imm <= 7)
8425 goto op16;
8426 }
8427 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
8428 && opcode != T_MNEM_ldrsb)
8429 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
8430 || (Rn == REG_SP && opcode == T_MNEM_str))
8431 {
8432 /* [Rn, #const] */
8433 if (Rn > 7)
8434 {
8435 if (Rn == REG_PC)
8436 {
8437 if (inst.reloc.pc_rel)
8438 opcode = T_MNEM_ldr_pc2;
8439 else
8440 opcode = T_MNEM_ldr_pc;
8441 }
8442 else
8443 {
8444 if (opcode == T_MNEM_ldr)
8445 opcode = T_MNEM_ldr_sp;
8446 else
8447 opcode = T_MNEM_str_sp;
8448 }
8449 inst.instruction = inst.operands[0].reg << 8;
8450 }
8451 else
8452 {
8453 inst.instruction = inst.operands[0].reg;
8454 inst.instruction |= inst.operands[1].reg << 3;
8455 }
8456 inst.instruction |= THUMB_OP16 (opcode);
8457 if (inst.size_req == 2)
8458 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8459 else
8460 inst.relax = opcode;
8461 return;
8462 }
8463 }
8464 /* Definitely a 32-bit variant. */
8465 inst.instruction = THUMB_OP32 (opcode);
8466 inst.instruction |= inst.operands[0].reg << 12;
8467 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
8468 return;
8469 }
8470
8471 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8472
8473 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
8474 {
8475 /* Only [Rn,Rm] is acceptable. */
8476 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
8477 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
8478 || inst.operands[1].postind || inst.operands[1].shifted
8479 || inst.operands[1].negative,
8480 _("Thumb does not support this addressing mode"));
8481 inst.instruction = THUMB_OP16 (inst.instruction);
8482 goto op16;
8483 }
8484
8485 inst.instruction = THUMB_OP16 (inst.instruction);
8486 if (!inst.operands[1].isreg)
8487 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8488 return;
8489
8490 constraint (!inst.operands[1].preind
8491 || inst.operands[1].shifted
8492 || inst.operands[1].writeback,
8493 _("Thumb does not support this addressing mode"));
8494 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
8495 {
8496 constraint (inst.instruction & 0x0600,
8497 _("byte or halfword not valid for base register"));
8498 constraint (inst.operands[1].reg == REG_PC
8499 && !(inst.instruction & THUMB_LOAD_BIT),
8500 _("r15 based store not allowed"));
8501 constraint (inst.operands[1].immisreg,
8502 _("invalid base register for register offset"));
8503
8504 if (inst.operands[1].reg == REG_PC)
8505 inst.instruction = T_OPCODE_LDR_PC;
8506 else if (inst.instruction & THUMB_LOAD_BIT)
8507 inst.instruction = T_OPCODE_LDR_SP;
8508 else
8509 inst.instruction = T_OPCODE_STR_SP;
8510
8511 inst.instruction |= inst.operands[0].reg << 8;
8512 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8513 return;
8514 }
8515
8516 constraint (inst.operands[1].reg > 7, BAD_HIREG);
8517 if (!inst.operands[1].immisreg)
8518 {
8519 /* Immediate offset. */
8520 inst.instruction |= inst.operands[0].reg;
8521 inst.instruction |= inst.operands[1].reg << 3;
8522 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8523 return;
8524 }
8525
8526 /* Register offset. */
8527 constraint (inst.operands[1].imm > 7, BAD_HIREG);
8528 constraint (inst.operands[1].negative,
8529 _("Thumb does not support this addressing mode"));
8530
8531 op16:
8532 switch (inst.instruction)
8533 {
8534 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
8535 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
8536 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
8537 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
8538 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
8539 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
8540 case 0x5600 /* ldrsb */:
8541 case 0x5e00 /* ldrsh */: break;
8542 default: abort ();
8543 }
8544
8545 inst.instruction |= inst.operands[0].reg;
8546 inst.instruction |= inst.operands[1].reg << 3;
8547 inst.instruction |= inst.operands[1].imm << 6;
8548 }
8549
8550 static void
8551 do_t_ldstd (void)
8552 {
8553 if (!inst.operands[1].present)
8554 {
8555 inst.operands[1].reg = inst.operands[0].reg + 1;
8556 constraint (inst.operands[0].reg == REG_LR,
8557 _("r14 not allowed here"));
8558 }
8559 inst.instruction |= inst.operands[0].reg << 12;
8560 inst.instruction |= inst.operands[1].reg << 8;
8561 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
8562
8563 }
8564
8565 static void
8566 do_t_ldstt (void)
8567 {
8568 inst.instruction |= inst.operands[0].reg << 12;
8569 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
8570 }
8571
8572 static void
8573 do_t_mla (void)
8574 {
8575 inst.instruction |= inst.operands[0].reg << 8;
8576 inst.instruction |= inst.operands[1].reg << 16;
8577 inst.instruction |= inst.operands[2].reg;
8578 inst.instruction |= inst.operands[3].reg << 12;
8579 }
8580
8581 static void
8582 do_t_mlal (void)
8583 {
8584 inst.instruction |= inst.operands[0].reg << 12;
8585 inst.instruction |= inst.operands[1].reg << 8;
8586 inst.instruction |= inst.operands[2].reg << 16;
8587 inst.instruction |= inst.operands[3].reg;
8588 }
8589
8590 static void
8591 do_t_mov_cmp (void)
8592 {
8593 if (unified_syntax)
8594 {
8595 int r0off = (inst.instruction == T_MNEM_mov
8596 || inst.instruction == T_MNEM_movs) ? 8 : 16;
8597 unsigned long opcode;
8598 bfd_boolean narrow;
8599 bfd_boolean low_regs;
8600
8601 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
8602 opcode = inst.instruction;
8603 if (current_it_mask)
8604 narrow = opcode != T_MNEM_movs;
8605 else
8606 narrow = opcode != T_MNEM_movs || low_regs;
8607 if (inst.size_req == 4
8608 || inst.operands[1].shifted)
8609 narrow = FALSE;
8610
8611 if (!inst.operands[1].isreg)
8612 {
8613 /* Immediate operand. */
8614 if (current_it_mask == 0 && opcode == T_MNEM_mov)
8615 narrow = 0;
8616 if (low_regs && narrow)
8617 {
8618 inst.instruction = THUMB_OP16 (opcode);
8619 inst.instruction |= inst.operands[0].reg << 8;
8620 if (inst.size_req == 2)
8621 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8622 else
8623 inst.relax = opcode;
8624 }
8625 else
8626 {
8627 inst.instruction = THUMB_OP32 (inst.instruction);
8628 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8629 inst.instruction |= inst.operands[0].reg << r0off;
8630 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8631 }
8632 }
8633 else if (!narrow)
8634 {
8635 inst.instruction = THUMB_OP32 (inst.instruction);
8636 inst.instruction |= inst.operands[0].reg << r0off;
8637 encode_thumb32_shifted_operand (1);
8638 }
8639 else
8640 switch (inst.instruction)
8641 {
8642 case T_MNEM_mov:
8643 inst.instruction = T_OPCODE_MOV_HR;
8644 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8645 inst.instruction |= (inst.operands[0].reg & 0x7);
8646 inst.instruction |= inst.operands[1].reg << 3;
8647 break;
8648
8649 case T_MNEM_movs:
8650 /* We know we have low registers at this point.
8651 Generate ADD Rd, Rs, #0. */
8652 inst.instruction = T_OPCODE_ADD_I3;
8653 inst.instruction |= inst.operands[0].reg;
8654 inst.instruction |= inst.operands[1].reg << 3;
8655 break;
8656
8657 case T_MNEM_cmp:
8658 if (low_regs)
8659 {
8660 inst.instruction = T_OPCODE_CMP_LR;
8661 inst.instruction |= inst.operands[0].reg;
8662 inst.instruction |= inst.operands[1].reg << 3;
8663 }
8664 else
8665 {
8666 inst.instruction = T_OPCODE_CMP_HR;
8667 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8668 inst.instruction |= (inst.operands[0].reg & 0x7);
8669 inst.instruction |= inst.operands[1].reg << 3;
8670 }
8671 break;
8672 }
8673 return;
8674 }
8675
8676 inst.instruction = THUMB_OP16 (inst.instruction);
8677 if (inst.operands[1].isreg)
8678 {
8679 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
8680 {
8681 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8682 since a MOV instruction produces unpredictable results. */
8683 if (inst.instruction == T_OPCODE_MOV_I8)
8684 inst.instruction = T_OPCODE_ADD_I3;
8685 else
8686 inst.instruction = T_OPCODE_CMP_LR;
8687
8688 inst.instruction |= inst.operands[0].reg;
8689 inst.instruction |= inst.operands[1].reg << 3;
8690 }
8691 else
8692 {
8693 if (inst.instruction == T_OPCODE_MOV_I8)
8694 inst.instruction = T_OPCODE_MOV_HR;
8695 else
8696 inst.instruction = T_OPCODE_CMP_HR;
8697 do_t_cpy ();
8698 }
8699 }
8700 else
8701 {
8702 constraint (inst.operands[0].reg > 7,
8703 _("only lo regs allowed with immediate"));
8704 inst.instruction |= inst.operands[0].reg << 8;
8705 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8706 }
8707 }
8708
8709 static void
8710 do_t_mov16 (void)
8711 {
8712 inst.instruction |= inst.operands[0].reg << 8;
8713 inst.instruction |= (inst.operands[1].imm & 0xf000) << 4;
8714 inst.instruction |= (inst.operands[1].imm & 0x0800) << 15;
8715 inst.instruction |= (inst.operands[1].imm & 0x0700) << 4;
8716 inst.instruction |= (inst.operands[1].imm & 0x00ff);
8717 }
8718
8719 static void
8720 do_t_mvn_tst (void)
8721 {
8722 if (unified_syntax)
8723 {
8724 int r0off = (inst.instruction == T_MNEM_mvn
8725 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
8726 bfd_boolean narrow;
8727
8728 if (inst.size_req == 4
8729 || inst.instruction > 0xffff
8730 || inst.operands[1].shifted
8731 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8732 narrow = FALSE;
8733 else if (inst.instruction == T_MNEM_cmn)
8734 narrow = TRUE;
8735 else if (THUMB_SETS_FLAGS (inst.instruction))
8736 narrow = (current_it_mask == 0);
8737 else
8738 narrow = (current_it_mask != 0);
8739
8740 if (!inst.operands[1].isreg)
8741 {
8742 /* For an immediate, we always generate a 32-bit opcode;
8743 section relaxation will shrink it later if possible. */
8744 if (inst.instruction < 0xffff)
8745 inst.instruction = THUMB_OP32 (inst.instruction);
8746 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8747 inst.instruction |= inst.operands[0].reg << r0off;
8748 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8749 }
8750 else
8751 {
8752 /* See if we can do this with a 16-bit instruction. */
8753 if (narrow)
8754 {
8755 inst.instruction = THUMB_OP16 (inst.instruction);
8756 inst.instruction |= inst.operands[0].reg;
8757 inst.instruction |= inst.operands[1].reg << 3;
8758 }
8759 else
8760 {
8761 constraint (inst.operands[1].shifted
8762 && inst.operands[1].immisreg,
8763 _("shift must be constant"));
8764 if (inst.instruction < 0xffff)
8765 inst.instruction = THUMB_OP32 (inst.instruction);
8766 inst.instruction |= inst.operands[0].reg << r0off;
8767 encode_thumb32_shifted_operand (1);
8768 }
8769 }
8770 }
8771 else
8772 {
8773 constraint (inst.instruction > 0xffff
8774 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
8775 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
8776 _("unshifted register required"));
8777 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8778 BAD_HIREG);
8779
8780 inst.instruction = THUMB_OP16 (inst.instruction);
8781 inst.instruction |= inst.operands[0].reg;
8782 inst.instruction |= inst.operands[1].reg << 3;
8783 }
8784 }
8785
8786 static void
8787 do_t_mrs (void)
8788 {
8789 int flags;
8790 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
8791 if (flags == 0)
8792 {
8793 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8794 _("selected processor does not support "
8795 "requested special purpose register"));
8796 }
8797 else
8798 {
8799 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8800 _("selected processor does not support "
8801 "requested special purpose register %x"));
8802 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8803 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
8804 _("'CPSR' or 'SPSR' expected"));
8805 }
8806
8807 inst.instruction |= inst.operands[0].reg << 8;
8808 inst.instruction |= (flags & SPSR_BIT) >> 2;
8809 inst.instruction |= inst.operands[1].imm & 0xff;
8810 }
8811
8812 static void
8813 do_t_msr (void)
8814 {
8815 int flags;
8816
8817 constraint (!inst.operands[1].isreg,
8818 _("Thumb encoding does not support an immediate here"));
8819 flags = inst.operands[0].imm;
8820 if (flags & ~0xff)
8821 {
8822 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8823 _("selected processor does not support "
8824 "requested special purpose register"));
8825 }
8826 else
8827 {
8828 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8829 _("selected processor does not support "
8830 "requested special purpose register"));
8831 flags |= PSR_f;
8832 }
8833 inst.instruction |= (flags & SPSR_BIT) >> 2;
8834 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
8835 inst.instruction |= (flags & 0xff);
8836 inst.instruction |= inst.operands[1].reg << 16;
8837 }
8838
8839 static void
8840 do_t_mul (void)
8841 {
8842 if (!inst.operands[2].present)
8843 inst.operands[2].reg = inst.operands[0].reg;
8844
8845 /* There is no 32-bit MULS and no 16-bit MUL. */
8846 if (unified_syntax && inst.instruction == T_MNEM_mul)
8847 {
8848 inst.instruction = THUMB_OP32 (inst.instruction);
8849 inst.instruction |= inst.operands[0].reg << 8;
8850 inst.instruction |= inst.operands[1].reg << 16;
8851 inst.instruction |= inst.operands[2].reg << 0;
8852 }
8853 else
8854 {
8855 constraint (!unified_syntax
8856 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
8857 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8858 BAD_HIREG);
8859
8860 inst.instruction = THUMB_OP16 (inst.instruction);
8861 inst.instruction |= inst.operands[0].reg;
8862
8863 if (inst.operands[0].reg == inst.operands[1].reg)
8864 inst.instruction |= inst.operands[2].reg << 3;
8865 else if (inst.operands[0].reg == inst.operands[2].reg)
8866 inst.instruction |= inst.operands[1].reg << 3;
8867 else
8868 constraint (1, _("dest must overlap one source register"));
8869 }
8870 }
8871
8872 static void
8873 do_t_mull (void)
8874 {
8875 inst.instruction |= inst.operands[0].reg << 12;
8876 inst.instruction |= inst.operands[1].reg << 8;
8877 inst.instruction |= inst.operands[2].reg << 16;
8878 inst.instruction |= inst.operands[3].reg;
8879
8880 if (inst.operands[0].reg == inst.operands[1].reg)
8881 as_tsktsk (_("rdhi and rdlo must be different"));
8882 }
8883
8884 static void
8885 do_t_nop (void)
8886 {
8887 if (unified_syntax)
8888 {
8889 if (inst.size_req == 4 || inst.operands[0].imm > 15)
8890 {
8891 inst.instruction = THUMB_OP32 (inst.instruction);
8892 inst.instruction |= inst.operands[0].imm;
8893 }
8894 else
8895 {
8896 inst.instruction = THUMB_OP16 (inst.instruction);
8897 inst.instruction |= inst.operands[0].imm << 4;
8898 }
8899 }
8900 else
8901 {
8902 constraint (inst.operands[0].present,
8903 _("Thumb does not support NOP with hints"));
8904 inst.instruction = 0x46c0;
8905 }
8906 }
8907
8908 static void
8909 do_t_neg (void)
8910 {
8911 if (unified_syntax)
8912 {
8913 bfd_boolean narrow;
8914
8915 if (THUMB_SETS_FLAGS (inst.instruction))
8916 narrow = (current_it_mask == 0);
8917 else
8918 narrow = (current_it_mask != 0);
8919 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8920 narrow = FALSE;
8921 if (inst.size_req == 4)
8922 narrow = FALSE;
8923
8924 if (!narrow)
8925 {
8926 inst.instruction = THUMB_OP32 (inst.instruction);
8927 inst.instruction |= inst.operands[0].reg << 8;
8928 inst.instruction |= inst.operands[1].reg << 16;
8929 }
8930 else
8931 {
8932 inst.instruction = THUMB_OP16 (inst.instruction);
8933 inst.instruction |= inst.operands[0].reg;
8934 inst.instruction |= inst.operands[1].reg << 3;
8935 }
8936 }
8937 else
8938 {
8939 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8940 BAD_HIREG);
8941 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8942
8943 inst.instruction = THUMB_OP16 (inst.instruction);
8944 inst.instruction |= inst.operands[0].reg;
8945 inst.instruction |= inst.operands[1].reg << 3;
8946 }
8947 }
8948
8949 static void
8950 do_t_pkhbt (void)
8951 {
8952 inst.instruction |= inst.operands[0].reg << 8;
8953 inst.instruction |= inst.operands[1].reg << 16;
8954 inst.instruction |= inst.operands[2].reg;
8955 if (inst.operands[3].present)
8956 {
8957 unsigned int val = inst.reloc.exp.X_add_number;
8958 constraint (inst.reloc.exp.X_op != O_constant,
8959 _("expression too complex"));
8960 inst.instruction |= (val & 0x1c) << 10;
8961 inst.instruction |= (val & 0x03) << 6;
8962 }
8963 }
8964
8965 static void
8966 do_t_pkhtb (void)
8967 {
8968 if (!inst.operands[3].present)
8969 inst.instruction &= ~0x00000020;
8970 do_t_pkhbt ();
8971 }
8972
8973 static void
8974 do_t_pld (void)
8975 {
8976 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
8977 }
8978
8979 static void
8980 do_t_push_pop (void)
8981 {
8982 unsigned mask;
8983
8984 constraint (inst.operands[0].writeback,
8985 _("push/pop do not support {reglist}^"));
8986 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8987 _("expression too complex"));
8988
8989 mask = inst.operands[0].imm;
8990 if ((mask & ~0xff) == 0)
8991 inst.instruction = THUMB_OP16 (inst.instruction);
8992 else if ((inst.instruction == T_MNEM_push
8993 && (mask & ~0xff) == 1 << REG_LR)
8994 || (inst.instruction == T_MNEM_pop
8995 && (mask & ~0xff) == 1 << REG_PC))
8996 {
8997 inst.instruction = THUMB_OP16 (inst.instruction);
8998 inst.instruction |= THUMB_PP_PC_LR;
8999 mask &= 0xff;
9000 }
9001 else if (unified_syntax)
9002 {
9003 if (mask & (1 << 13))
9004 inst.error = _("SP not allowed in register list");
9005 if (inst.instruction == T_MNEM_push)
9006 {
9007 if (mask & (1 << 15))
9008 inst.error = _("PC not allowed in register list");
9009 }
9010 else
9011 {
9012 if (mask & (1 << 14)
9013 && mask & (1 << 15))
9014 inst.error = _("LR and PC should not both be in register list");
9015 }
9016 if ((mask & (mask - 1)) == 0)
9017 {
9018 /* Single register push/pop implemented as str/ldr. */
9019 if (inst.instruction == T_MNEM_push)
9020 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9021 else
9022 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9023 mask = ffs(mask) - 1;
9024 mask <<= 12;
9025 }
9026 else
9027 inst.instruction = THUMB_OP32 (inst.instruction);
9028 }
9029 else
9030 {
9031 inst.error = _("invalid register list to push/pop instruction");
9032 return;
9033 }
9034
9035 inst.instruction |= mask;
9036 }
9037
9038 static void
9039 do_t_rbit (void)
9040 {
9041 inst.instruction |= inst.operands[0].reg << 8;
9042 inst.instruction |= inst.operands[1].reg << 16;
9043 }
9044
9045 static void
9046 do_t_rev (void)
9047 {
9048 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9049 && inst.size_req != 4)
9050 {
9051 inst.instruction = THUMB_OP16 (inst.instruction);
9052 inst.instruction |= inst.operands[0].reg;
9053 inst.instruction |= inst.operands[1].reg << 3;
9054 }
9055 else if (unified_syntax)
9056 {
9057 inst.instruction = THUMB_OP32 (inst.instruction);
9058 inst.instruction |= inst.operands[0].reg << 8;
9059 inst.instruction |= inst.operands[1].reg << 16;
9060 inst.instruction |= inst.operands[1].reg;
9061 }
9062 else
9063 inst.error = BAD_HIREG;
9064 }
9065
9066 static void
9067 do_t_rsb (void)
9068 {
9069 int Rd, Rs;
9070
9071 Rd = inst.operands[0].reg;
9072 Rs = (inst.operands[1].present
9073 ? inst.operands[1].reg /* Rd, Rs, foo */
9074 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9075
9076 inst.instruction |= Rd << 8;
9077 inst.instruction |= Rs << 16;
9078 if (!inst.operands[2].isreg)
9079 {
9080 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9081 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9082 }
9083 else
9084 encode_thumb32_shifted_operand (2);
9085 }
9086
9087 static void
9088 do_t_setend (void)
9089 {
9090 constraint (current_it_mask, BAD_NOT_IT);
9091 if (inst.operands[0].imm)
9092 inst.instruction |= 0x8;
9093 }
9094
9095 static void
9096 do_t_shift (void)
9097 {
9098 if (!inst.operands[1].present)
9099 inst.operands[1].reg = inst.operands[0].reg;
9100
9101 if (unified_syntax)
9102 {
9103 bfd_boolean narrow;
9104 int shift_kind;
9105
9106 switch (inst.instruction)
9107 {
9108 case T_MNEM_asr:
9109 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9110 case T_MNEM_lsl:
9111 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9112 case T_MNEM_lsr:
9113 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9114 case T_MNEM_ror:
9115 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9116 default: abort ();
9117 }
9118
9119 if (THUMB_SETS_FLAGS (inst.instruction))
9120 narrow = (current_it_mask == 0);
9121 else
9122 narrow = (current_it_mask != 0);
9123 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9124 narrow = FALSE;
9125 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9126 narrow = FALSE;
9127 if (inst.operands[2].isreg
9128 && (inst.operands[1].reg != inst.operands[0].reg
9129 || inst.operands[2].reg > 7))
9130 narrow = FALSE;
9131 if (inst.size_req == 4)
9132 narrow = FALSE;
9133
9134 if (!narrow)
9135 {
9136 if (inst.operands[2].isreg)
9137 {
9138 inst.instruction = THUMB_OP32 (inst.instruction);
9139 inst.instruction |= inst.operands[0].reg << 8;
9140 inst.instruction |= inst.operands[1].reg << 16;
9141 inst.instruction |= inst.operands[2].reg;
9142 }
9143 else
9144 {
9145 inst.operands[1].shifted = 1;
9146 inst.operands[1].shift_kind = shift_kind;
9147 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9148 ? T_MNEM_movs : T_MNEM_mov);
9149 inst.instruction |= inst.operands[0].reg << 8;
9150 encode_thumb32_shifted_operand (1);
9151 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9152 inst.reloc.type = BFD_RELOC_UNUSED;
9153 }
9154 }
9155 else
9156 {
9157 if (inst.operands[2].isreg)
9158 {
9159 switch (shift_kind)
9160 {
9161 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9162 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9163 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9164 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9165 default: abort ();
9166 }
9167
9168 inst.instruction |= inst.operands[0].reg;
9169 inst.instruction |= inst.operands[2].reg << 3;
9170 }
9171 else
9172 {
9173 switch (shift_kind)
9174 {
9175 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9176 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9177 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9178 default: abort ();
9179 }
9180 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9181 inst.instruction |= inst.operands[0].reg;
9182 inst.instruction |= inst.operands[1].reg << 3;
9183 }
9184 }
9185 }
9186 else
9187 {
9188 constraint (inst.operands[0].reg > 7
9189 || inst.operands[1].reg > 7, BAD_HIREG);
9190 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9191
9192 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9193 {
9194 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9195 constraint (inst.operands[0].reg != inst.operands[1].reg,
9196 _("source1 and dest must be same register"));
9197
9198 switch (inst.instruction)
9199 {
9200 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9201 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9202 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9203 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9204 default: abort ();
9205 }
9206
9207 inst.instruction |= inst.operands[0].reg;
9208 inst.instruction |= inst.operands[2].reg << 3;
9209 }
9210 else
9211 {
9212 switch (inst.instruction)
9213 {
9214 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
9215 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
9216 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
9217 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
9218 default: abort ();
9219 }
9220 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9221 inst.instruction |= inst.operands[0].reg;
9222 inst.instruction |= inst.operands[1].reg << 3;
9223 }
9224 }
9225 }
9226
9227 static void
9228 do_t_simd (void)
9229 {
9230 inst.instruction |= inst.operands[0].reg << 8;
9231 inst.instruction |= inst.operands[1].reg << 16;
9232 inst.instruction |= inst.operands[2].reg;
9233 }
9234
9235 static void
9236 do_t_smc (void)
9237 {
9238 unsigned int value = inst.reloc.exp.X_add_number;
9239 constraint (inst.reloc.exp.X_op != O_constant,
9240 _("expression too complex"));
9241 inst.reloc.type = BFD_RELOC_UNUSED;
9242 inst.instruction |= (value & 0xf000) >> 12;
9243 inst.instruction |= (value & 0x0ff0);
9244 inst.instruction |= (value & 0x000f) << 16;
9245 }
9246
9247 static void
9248 do_t_ssat (void)
9249 {
9250 inst.instruction |= inst.operands[0].reg << 8;
9251 inst.instruction |= inst.operands[1].imm - 1;
9252 inst.instruction |= inst.operands[2].reg << 16;
9253
9254 if (inst.operands[3].present)
9255 {
9256 constraint (inst.reloc.exp.X_op != O_constant,
9257 _("expression too complex"));
9258
9259 if (inst.reloc.exp.X_add_number != 0)
9260 {
9261 if (inst.operands[3].shift_kind == SHIFT_ASR)
9262 inst.instruction |= 0x00200000; /* sh bit */
9263 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9264 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9265 }
9266 inst.reloc.type = BFD_RELOC_UNUSED;
9267 }
9268 }
9269
9270 static void
9271 do_t_ssat16 (void)
9272 {
9273 inst.instruction |= inst.operands[0].reg << 8;
9274 inst.instruction |= inst.operands[1].imm - 1;
9275 inst.instruction |= inst.operands[2].reg << 16;
9276 }
9277
9278 static void
9279 do_t_strex (void)
9280 {
9281 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9282 || inst.operands[2].postind || inst.operands[2].writeback
9283 || inst.operands[2].immisreg || inst.operands[2].shifted
9284 || inst.operands[2].negative,
9285 BAD_ADDR_MODE);
9286
9287 inst.instruction |= inst.operands[0].reg << 8;
9288 inst.instruction |= inst.operands[1].reg << 12;
9289 inst.instruction |= inst.operands[2].reg << 16;
9290 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9291 }
9292
9293 static void
9294 do_t_strexd (void)
9295 {
9296 if (!inst.operands[2].present)
9297 inst.operands[2].reg = inst.operands[1].reg + 1;
9298
9299 constraint (inst.operands[0].reg == inst.operands[1].reg
9300 || inst.operands[0].reg == inst.operands[2].reg
9301 || inst.operands[0].reg == inst.operands[3].reg
9302 || inst.operands[1].reg == inst.operands[2].reg,
9303 BAD_OVERLAP);
9304
9305 inst.instruction |= inst.operands[0].reg;
9306 inst.instruction |= inst.operands[1].reg << 12;
9307 inst.instruction |= inst.operands[2].reg << 8;
9308 inst.instruction |= inst.operands[3].reg << 16;
9309 }
9310
9311 static void
9312 do_t_sxtah (void)
9313 {
9314 inst.instruction |= inst.operands[0].reg << 8;
9315 inst.instruction |= inst.operands[1].reg << 16;
9316 inst.instruction |= inst.operands[2].reg;
9317 inst.instruction |= inst.operands[3].imm << 4;
9318 }
9319
9320 static void
9321 do_t_sxth (void)
9322 {
9323 if (inst.instruction <= 0xffff && inst.size_req != 4
9324 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9325 && (!inst.operands[2].present || inst.operands[2].imm == 0))
9326 {
9327 inst.instruction = THUMB_OP16 (inst.instruction);
9328 inst.instruction |= inst.operands[0].reg;
9329 inst.instruction |= inst.operands[1].reg << 3;
9330 }
9331 else if (unified_syntax)
9332 {
9333 if (inst.instruction <= 0xffff)
9334 inst.instruction = THUMB_OP32 (inst.instruction);
9335 inst.instruction |= inst.operands[0].reg << 8;
9336 inst.instruction |= inst.operands[1].reg;
9337 inst.instruction |= inst.operands[2].imm << 4;
9338 }
9339 else
9340 {
9341 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
9342 _("Thumb encoding does not support rotation"));
9343 constraint (1, BAD_HIREG);
9344 }
9345 }
9346
9347 static void
9348 do_t_swi (void)
9349 {
9350 inst.reloc.type = BFD_RELOC_ARM_SWI;
9351 }
9352
9353 static void
9354 do_t_tb (void)
9355 {
9356 int half;
9357
9358 half = (inst.instruction & 0x10) != 0;
9359 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9360 constraint (inst.operands[0].immisreg,
9361 _("instruction requires register index"));
9362 constraint (inst.operands[0].imm == 15,
9363 _("PC is not a valid index register"));
9364 constraint (!half && inst.operands[0].shifted,
9365 _("instruction does not allow shifted index"));
9366 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
9367 }
9368
9369 static void
9370 do_t_usat (void)
9371 {
9372 inst.instruction |= inst.operands[0].reg << 8;
9373 inst.instruction |= inst.operands[1].imm;
9374 inst.instruction |= inst.operands[2].reg << 16;
9375
9376 if (inst.operands[3].present)
9377 {
9378 constraint (inst.reloc.exp.X_op != O_constant,
9379 _("expression too complex"));
9380 if (inst.reloc.exp.X_add_number != 0)
9381 {
9382 if (inst.operands[3].shift_kind == SHIFT_ASR)
9383 inst.instruction |= 0x00200000; /* sh bit */
9384
9385 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9386 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9387 }
9388 inst.reloc.type = BFD_RELOC_UNUSED;
9389 }
9390 }
9391
9392 static void
9393 do_t_usat16 (void)
9394 {
9395 inst.instruction |= inst.operands[0].reg << 8;
9396 inst.instruction |= inst.operands[1].imm;
9397 inst.instruction |= inst.operands[2].reg << 16;
9398 }
9399
9400 /* Neon instruction encoder helpers. */
9401
9402 /* Encodings for the different types for various Neon opcodes. */
9403
9404 /* An "invalid" code for the following tables. */
9405 #define N_INV -1u
9406
9407 struct neon_tab_entry
9408 {
9409 unsigned integer;
9410 unsigned float_or_poly;
9411 unsigned scalar_or_imm;
9412 };
9413
9414 /* Map overloaded Neon opcodes to their respective encodings. */
9415 #define NEON_ENC_TAB \
9416 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9417 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9418 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9419 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9420 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9421 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9422 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9423 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9424 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9425 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9426 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9427 /* Register variants of the following two instructions are encoded as
9428 vcge / vcgt with the operands reversed. */ \
9429 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9430 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9431 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9432 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9433 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9434 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9435 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9436 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9437 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9438 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9439 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9440 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9441 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9442 X(vshl, 0x0000400, N_INV, 0x0800510), \
9443 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9444 X(vand, 0x0000110, N_INV, 0x0800030), \
9445 X(vbic, 0x0100110, N_INV, 0x0800030), \
9446 X(veor, 0x1000110, N_INV, N_INV), \
9447 X(vorn, 0x0300110, N_INV, 0x0800010), \
9448 X(vorr, 0x0200110, N_INV, 0x0800010), \
9449 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9450 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9451 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9452 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9453 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9454 X(vst1, 0x0000000, 0x0800000, N_INV), \
9455 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9456 X(vst2, 0x0000100, 0x0800100, N_INV), \
9457 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9458 X(vst3, 0x0000200, 0x0800200, N_INV), \
9459 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9460 X(vst4, 0x0000300, 0x0800300, N_INV), \
9461 X(vmovn, 0x1b20200, N_INV, N_INV), \
9462 X(vtrn, 0x1b20080, N_INV, N_INV), \
9463 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9464 X(vqmovun, 0x1b20240, N_INV, N_INV)
9465
9466 enum neon_opc
9467 {
9468 #define X(OPC,I,F,S) N_MNEM_##OPC
9469 NEON_ENC_TAB
9470 #undef X
9471 };
9472
9473 static const struct neon_tab_entry neon_enc_tab[] =
9474 {
9475 #define X(OPC,I,F,S) { (I), (F), (S) }
9476 NEON_ENC_TAB
9477 #undef X
9478 };
9479
9480 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9481 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9482 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9483 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9484 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9485 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9486 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9487 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9488 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9489
9490 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9491 shapes which an instruction can accept. The following mnemonic characters
9492 are used in the tag names for this enumeration:
9493
9494 D - Neon D<n> register
9495 Q - Neon Q<n> register
9496 I - Immediate
9497 S - Scalar
9498 R - ARM register
9499 L - D<n> register list
9500 */
9501
9502 enum neon_shape
9503 {
9504 NS_DDD_QQQ,
9505 NS_DDD,
9506 NS_QQQ,
9507 NS_DDI_QQI,
9508 NS_DDI,
9509 NS_QQI,
9510 NS_DDS_QQS,
9511 NS_DDS,
9512 NS_QQS,
9513 NS_DD_QQ,
9514 NS_DD,
9515 NS_QQ,
9516 NS_DS_QS,
9517 NS_DS,
9518 NS_QS,
9519 NS_DR_QR,
9520 NS_DR,
9521 NS_QR,
9522 NS_DI_QI,
9523 NS_DI,
9524 NS_QI,
9525 NS_DLD,
9526 NS_DQ,
9527 NS_QD,
9528 NS_DQI,
9529 NS_QDI,
9530 NS_QDD,
9531 NS_QDS,
9532 NS_QQD,
9533 NS_DQQ,
9534 NS_DDDI_QQQI,
9535 NS_DDDI,
9536 NS_QQQI,
9537 NS_IGNORE
9538 };
9539
9540 /* Bit masks used in type checking given instructions.
9541 'N_EQK' means the type must be the same as (or based on in some way) the key
9542 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9543 set, various other bits can be set as well in order to modify the meaning of
9544 the type constraint. */
9545
9546 enum neon_type_mask
9547 {
9548 N_S8 = 0x000001,
9549 N_S16 = 0x000002,
9550 N_S32 = 0x000004,
9551 N_S64 = 0x000008,
9552 N_U8 = 0x000010,
9553 N_U16 = 0x000020,
9554 N_U32 = 0x000040,
9555 N_U64 = 0x000080,
9556 N_I8 = 0x000100,
9557 N_I16 = 0x000200,
9558 N_I32 = 0x000400,
9559 N_I64 = 0x000800,
9560 N_8 = 0x001000,
9561 N_16 = 0x002000,
9562 N_32 = 0x004000,
9563 N_64 = 0x008000,
9564 N_P8 = 0x010000,
9565 N_P16 = 0x020000,
9566 N_F32 = 0x040000,
9567 N_KEY = 0x080000, /* key element (main type specifier). */
9568 N_EQK = 0x100000, /* given operand has the same type & size as the key. */
9569 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
9570 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
9571 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
9572 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9573 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
9574 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
9575 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9576 N_UTYP = 0,
9577 N_MAX_NONSPECIAL = N_F32
9578 };
9579
9580 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9581
9582 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9583 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9584 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9585 #define N_SUF_32 (N_SU_32 | N_F32)
9586 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9587 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9588
9589 /* Pass this as the first type argument to neon_check_type to ignore types
9590 altogether. */
9591 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9592
9593 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9594 specific shape when there are two alternatives. For non-polymorphic shapes,
9595 checking is done during operand parsing, so is not implemented here. */
9596
9597 static enum neon_shape
9598 neon_check_shape (enum neon_shape req)
9599 {
9600 #define RR(X) (inst.operands[(X)].isreg)
9601 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9602 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9603 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9604 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9605
9606 /* Fix missing optional operands. FIXME: we don't know at this point how
9607 many arguments we should have, so this makes the assumption that we have
9608 > 1. This is true of all current Neon opcodes, I think, but may not be
9609 true in the future. */
9610 if (!inst.operands[1].present)
9611 inst.operands[1] = inst.operands[0];
9612
9613 switch (req)
9614 {
9615 case NS_DDD_QQQ:
9616 {
9617 if (RD(0) && RD(1) && RD(2))
9618 return NS_DDD;
9619 else if (RQ(0) && RQ(1) && RQ(2))
9620 return NS_QQQ;
9621 else
9622 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9623 "operands"));
9624 }
9625 break;
9626
9627 case NS_DDI_QQI:
9628 {
9629 if (RD(0) && RD(1) && IM(2))
9630 return NS_DDI;
9631 else if (RQ(0) && RQ(1) && IM(2))
9632 return NS_QQI;
9633 else
9634 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9635 "operands"));
9636 }
9637 break;
9638
9639 case NS_DDDI_QQQI:
9640 {
9641 if (RD(0) && RD(1) && RD(2) && IM(3))
9642 return NS_DDDI;
9643 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9644 return NS_QQQI;
9645 else
9646 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9647 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9648 }
9649 break;
9650
9651 case NS_DDS_QQS:
9652 {
9653 if (RD(0) && RD(1) && SC(2))
9654 return NS_DDS;
9655 else if (RQ(0) && RQ(1) && SC(2))
9656 return NS_QQS;
9657 else
9658 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9659 "operands"));
9660 }
9661 break;
9662
9663 case NS_DD_QQ:
9664 {
9665 if (RD(0) && RD(1))
9666 return NS_DD;
9667 else if (RQ(0) && RQ(1))
9668 return NS_QQ;
9669 else
9670 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9671 }
9672 break;
9673
9674 case NS_DS_QS:
9675 {
9676 if (RD(0) && SC(1))
9677 return NS_DS;
9678 else if (RQ(0) && SC(1))
9679 return NS_QS;
9680 else
9681 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9682 }
9683 break;
9684
9685 case NS_DR_QR:
9686 {
9687 if (RD(0) && RR(1))
9688 return NS_DR;
9689 else if (RQ(0) && RR(1))
9690 return NS_QR;
9691 else
9692 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9693 }
9694 break;
9695
9696 case NS_DI_QI:
9697 {
9698 if (RD(0) && IM(1))
9699 return NS_DI;
9700 else if (RQ(0) && IM(1))
9701 return NS_QI;
9702 else
9703 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9704 }
9705 break;
9706
9707 default:
9708 abort ();
9709 }
9710
9711 return req;
9712 #undef RR
9713 #undef RD
9714 #undef RQ
9715 #undef IM
9716 #undef SC
9717 }
9718
9719 static void
9720 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
9721 unsigned *g_size)
9722 {
9723 /* Allow modification to be made to types which are constrained to be
9724 based on the key element, based on bits set alongside N_EQK. */
9725 if ((typebits & N_EQK) != 0)
9726 {
9727 if ((typebits & N_HLF) != 0)
9728 *g_size /= 2;
9729 else if ((typebits & N_DBL) != 0)
9730 *g_size *= 2;
9731 if ((typebits & N_SGN) != 0)
9732 *g_type = NT_signed;
9733 else if ((typebits & N_UNS) != 0)
9734 *g_type = NT_unsigned;
9735 else if ((typebits & N_INT) != 0)
9736 *g_type = NT_integer;
9737 else if ((typebits & N_FLT) != 0)
9738 *g_type = NT_float;
9739 else if ((typebits & N_SIZ) != 0)
9740 *g_type = NT_untyped;
9741 }
9742 }
9743
9744 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9745 operand type, i.e. the single type specified in a Neon instruction when it
9746 is the only one given. */
9747
9748 static struct neon_type_el
9749 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
9750 {
9751 struct neon_type_el dest = *key;
9752
9753 assert ((thisarg & N_EQK) != 0);
9754
9755 neon_modify_type_size (thisarg, &dest.type, &dest.size);
9756
9757 return dest;
9758 }
9759
9760 /* Convert Neon type and size into compact bitmask representation. */
9761
9762 static enum neon_type_mask
9763 type_chk_of_el_type (enum neon_el_type type, unsigned size)
9764 {
9765 switch (type)
9766 {
9767 case NT_untyped:
9768 switch (size)
9769 {
9770 case 8: return N_8;
9771 case 16: return N_16;
9772 case 32: return N_32;
9773 case 64: return N_64;
9774 default: ;
9775 }
9776 break;
9777
9778 case NT_integer:
9779 switch (size)
9780 {
9781 case 8: return N_I8;
9782 case 16: return N_I16;
9783 case 32: return N_I32;
9784 case 64: return N_I64;
9785 default: ;
9786 }
9787 break;
9788
9789 case NT_float:
9790 if (size == 32)
9791 return N_F32;
9792 break;
9793
9794 case NT_poly:
9795 switch (size)
9796 {
9797 case 8: return N_P8;
9798 case 16: return N_P16;
9799 default: ;
9800 }
9801 break;
9802
9803 case NT_signed:
9804 switch (size)
9805 {
9806 case 8: return N_S8;
9807 case 16: return N_S16;
9808 case 32: return N_S32;
9809 case 64: return N_S64;
9810 default: ;
9811 }
9812 break;
9813
9814 case NT_unsigned:
9815 switch (size)
9816 {
9817 case 8: return N_U8;
9818 case 16: return N_U16;
9819 case 32: return N_U32;
9820 case 64: return N_U64;
9821 default: ;
9822 }
9823 break;
9824
9825 default: ;
9826 }
9827
9828 return N_UTYP;
9829 }
9830
9831 /* Convert compact Neon bitmask type representation to a type and size. Only
9832 handles the case where a single bit is set in the mask. */
9833
9834 static int
9835 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
9836 enum neon_type_mask mask)
9837 {
9838 if ((mask & N_EQK) != 0)
9839 return FAIL;
9840
9841 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
9842 *size = 8;
9843 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
9844 *size = 16;
9845 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
9846 *size = 32;
9847 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64)) != 0)
9848 *size = 64;
9849 else
9850 return FAIL;
9851
9852 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
9853 *type = NT_signed;
9854 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
9855 *type = NT_unsigned;
9856 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
9857 *type = NT_integer;
9858 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
9859 *type = NT_untyped;
9860 else if ((mask & (N_P8 | N_P16)) != 0)
9861 *type = NT_poly;
9862 else if ((mask & N_F32) != 0)
9863 *type = NT_float;
9864 else
9865 return FAIL;
9866
9867 return SUCCESS;
9868 }
9869
9870 /* Modify a bitmask of allowed types. This is only needed for type
9871 relaxation. */
9872
9873 static unsigned
9874 modify_types_allowed (unsigned allowed, unsigned mods)
9875 {
9876 unsigned size;
9877 enum neon_el_type type;
9878 unsigned destmask;
9879 int i;
9880
9881 destmask = 0;
9882
9883 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
9884 {
9885 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
9886 {
9887 neon_modify_type_size (mods, &type, &size);
9888 destmask |= type_chk_of_el_type (type, size);
9889 }
9890 }
9891
9892 return destmask;
9893 }
9894
9895 /* Check type and return type classification.
9896 The manual states (paraphrase): If one datatype is given, it indicates the
9897 type given in:
9898 - the second operand, if there is one
9899 - the operand, if there is no second operand
9900 - the result, if there are no operands.
9901 This isn't quite good enough though, so we use a concept of a "key" datatype
9902 which is set on a per-instruction basis, which is the one which matters when
9903 only one data type is written.
9904 Note: this function has side-effects (e.g. filling in missing operands). All
9905 Neon instructions should call it before performing bit encoding.
9906 */
9907
9908 static struct neon_type_el
9909 neon_check_type (unsigned els, enum neon_shape ns, ...)
9910 {
9911 va_list ap;
9912 unsigned i, pass, key_el = 0;
9913 unsigned types[NEON_MAX_TYPE_ELS];
9914 enum neon_el_type k_type = NT_invtype;
9915 unsigned k_size = -1u;
9916 struct neon_type_el badtype = {NT_invtype, -1};
9917 unsigned key_allowed = 0;
9918
9919 /* Optional registers in Neon instructions are always (not) in operand 1.
9920 Fill in the missing operand here, if it was omitted. */
9921 if (els > 1 && !inst.operands[1].present)
9922 inst.operands[1] = inst.operands[0];
9923
9924 /* Suck up all the varargs. */
9925 va_start (ap, ns);
9926 for (i = 0; i < els; i++)
9927 {
9928 unsigned thisarg = va_arg (ap, unsigned);
9929 if (thisarg == N_IGNORE_TYPE)
9930 {
9931 va_end (ap);
9932 return badtype;
9933 }
9934 types[i] = thisarg;
9935 if ((thisarg & N_KEY) != 0)
9936 key_el = i;
9937 }
9938 va_end (ap);
9939
9940 if (inst.vectype.elems > 0)
9941 for (i = 0; i < els; i++)
9942 if (inst.operands[i].vectype.type != NT_invtype)
9943 {
9944 first_error (_("types specified in both the mnemonic and operands"));
9945 return badtype;
9946 }
9947
9948 /* Duplicate inst.vectype elements here as necessary.
9949 FIXME: No idea if this is exactly the same as the ARM assembler,
9950 particularly when an insn takes one register and one non-register
9951 operand. */
9952 if (inst.vectype.elems == 1 && els > 1)
9953 {
9954 unsigned j;
9955 inst.vectype.elems = els;
9956 inst.vectype.el[key_el] = inst.vectype.el[0];
9957 for (j = 0; j < els; j++)
9958 if (j != key_el)
9959 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
9960 types[j]);
9961 }
9962 else if (inst.vectype.elems == 0 && els > 0)
9963 {
9964 unsigned j;
9965 /* No types were given after the mnemonic, so look for types specified
9966 after each operand. We allow some flexibility here; as long as the
9967 "key" operand has a type, we can infer the others. */
9968 for (j = 0; j < els; j++)
9969 if (inst.operands[j].vectype.type != NT_invtype)
9970 inst.vectype.el[j] = inst.operands[j].vectype;
9971
9972 if (inst.operands[key_el].vectype.type != NT_invtype)
9973 {
9974 for (j = 0; j < els; j++)
9975 if (inst.operands[j].vectype.type == NT_invtype)
9976 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
9977 types[j]);
9978 }
9979 else
9980 {
9981 first_error (_("operand types can't be inferred"));
9982 return badtype;
9983 }
9984 }
9985 else if (inst.vectype.elems != els)
9986 {
9987 first_error (_("type specifier has the wrong number of parts"));
9988 return badtype;
9989 }
9990
9991 for (pass = 0; pass < 2; pass++)
9992 {
9993 for (i = 0; i < els; i++)
9994 {
9995 unsigned thisarg = types[i];
9996 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
9997 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
9998 enum neon_el_type g_type = inst.vectype.el[i].type;
9999 unsigned g_size = inst.vectype.el[i].size;
10000
10001 /* Decay more-specific signed & unsigned types to sign-insensitive
10002 integer types if sign-specific variants are unavailable. */
10003 if ((g_type == NT_signed || g_type == NT_unsigned)
10004 && (types_allowed & N_SU_ALL) == 0)
10005 g_type = NT_integer;
10006
10007 /* If only untyped args are allowed, decay any more specific types to
10008 them. Some instructions only care about signs for some element
10009 sizes, so handle that properly. */
10010 if ((g_size == 8 && (types_allowed & N_8) != 0)
10011 || (g_size == 16 && (types_allowed & N_16) != 0)
10012 || (g_size == 32 && (types_allowed & N_32) != 0)
10013 || (g_size == 64 && (types_allowed & N_64) != 0))
10014 g_type = NT_untyped;
10015
10016 if (pass == 0)
10017 {
10018 if ((thisarg & N_KEY) != 0)
10019 {
10020 k_type = g_type;
10021 k_size = g_size;
10022 key_allowed = thisarg & ~N_KEY;
10023 }
10024 }
10025 else
10026 {
10027 if ((thisarg & N_EQK) == 0)
10028 {
10029 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10030
10031 if ((given_type & types_allowed) == 0)
10032 {
10033 first_error (_("bad type in Neon instruction"));
10034 return badtype;
10035 }
10036 }
10037 else
10038 {
10039 enum neon_el_type mod_k_type = k_type;
10040 unsigned mod_k_size = k_size;
10041 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10042 if (g_type != mod_k_type || g_size != mod_k_size)
10043 {
10044 first_error (_("inconsistent types in Neon instruction"));
10045 return badtype;
10046 }
10047 }
10048 }
10049 }
10050 }
10051
10052 return inst.vectype.el[key_el];
10053 }
10054
10055 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10056 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10057
10058 static unsigned
10059 neon_dp_fixup (unsigned i)
10060 {
10061 if (thumb_mode)
10062 {
10063 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10064 if (i & (1 << 24))
10065 i |= 1 << 28;
10066
10067 i &= ~(1 << 24);
10068
10069 i |= 0xef000000;
10070 }
10071 else
10072 i |= 0xf2000000;
10073
10074 return i;
10075 }
10076
10077 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10078 (0, 1, 2, 3). */
10079
10080 static unsigned
10081 neon_logbits (unsigned x)
10082 {
10083 return ffs (x) - 4;
10084 }
10085
10086 #define LOW4(R) ((R) & 0xf)
10087 #define HI1(R) (((R) >> 4) & 1)
10088
10089 /* Encode insns with bit pattern:
10090
10091 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10092 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10093
10094 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10095 different meaning for some instruction. */
10096
10097 static void
10098 neon_three_same (int isquad, int ubit, int size)
10099 {
10100 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10101 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10102 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10103 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10104 inst.instruction |= LOW4 (inst.operands[2].reg);
10105 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10106 inst.instruction |= (isquad != 0) << 6;
10107 inst.instruction |= (ubit != 0) << 24;
10108 if (size != -1)
10109 inst.instruction |= neon_logbits (size) << 20;
10110
10111 inst.instruction = neon_dp_fixup (inst.instruction);
10112 }
10113
10114 /* Encode instructions of the form:
10115
10116 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10117 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10118
10119 Don't write size if SIZE == -1. */
10120
10121 static void
10122 neon_two_same (int qbit, int ubit, int size)
10123 {
10124 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10125 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10126 inst.instruction |= LOW4 (inst.operands[1].reg);
10127 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10128 inst.instruction |= (qbit != 0) << 6;
10129 inst.instruction |= (ubit != 0) << 24;
10130
10131 if (size != -1)
10132 inst.instruction |= neon_logbits (size) << 18;
10133
10134 inst.instruction = neon_dp_fixup (inst.instruction);
10135 }
10136
10137 /* Neon instruction encoders, in approximate order of appearance. */
10138
10139 static void
10140 do_neon_dyadic_i_su (void)
10141 {
10142 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10143 struct neon_type_el et = neon_check_type (3, rs,
10144 N_EQK, N_EQK, N_SU_32 | N_KEY);
10145 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10146 }
10147
10148 static void
10149 do_neon_dyadic_i64_su (void)
10150 {
10151 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10152 struct neon_type_el et = neon_check_type (3, rs,
10153 N_EQK, N_EQK, N_SU_ALL | N_KEY);
10154 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10155 }
10156
10157 static void
10158 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
10159 unsigned immbits)
10160 {
10161 unsigned size = et.size >> 3;
10162 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10163 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10164 inst.instruction |= LOW4 (inst.operands[1].reg);
10165 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10166 inst.instruction |= (isquad != 0) << 6;
10167 inst.instruction |= immbits << 16;
10168 inst.instruction |= (size >> 3) << 7;
10169 inst.instruction |= (size & 0x7) << 19;
10170 if (write_ubit)
10171 inst.instruction |= (uval != 0) << 24;
10172
10173 inst.instruction = neon_dp_fixup (inst.instruction);
10174 }
10175
10176 static void
10177 do_neon_shl_imm (void)
10178 {
10179 if (!inst.operands[2].isreg)
10180 {
10181 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10182 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
10183 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10184 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, inst.operands[2].imm);
10185 }
10186 else
10187 {
10188 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10189 struct neon_type_el et = neon_check_type (3, rs,
10190 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10191 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10192 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10193 }
10194 }
10195
10196 static void
10197 do_neon_qshl_imm (void)
10198 {
10199 if (!inst.operands[2].isreg)
10200 {
10201 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10202 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
10203 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10204 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
10205 inst.operands[2].imm);
10206 }
10207 else
10208 {
10209 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10210 struct neon_type_el et = neon_check_type (3, rs,
10211 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10212 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10213 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10214 }
10215 }
10216
10217 static int
10218 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
10219 {
10220 /* Handle .I8 and .I64 as pseudo-instructions. */
10221 switch (size)
10222 {
10223 case 8:
10224 /* Unfortunately, this will make everything apart from zero out-of-range.
10225 FIXME is this the intended semantics? There doesn't seem much point in
10226 accepting .I8 if so. */
10227 immediate |= immediate << 8;
10228 size = 16;
10229 break;
10230 case 64:
10231 /* Similarly, anything other than zero will be replicated in bits [63:32],
10232 which probably isn't want we want if we specified .I64. */
10233 if (immediate != 0)
10234 goto bad_immediate;
10235 size = 32;
10236 break;
10237 default: ;
10238 }
10239
10240 if (immediate == (immediate & 0x000000ff))
10241 {
10242 *immbits = immediate;
10243 return (size == 16) ? 0x9 : 0x1;
10244 }
10245 else if (immediate == (immediate & 0x0000ff00))
10246 {
10247 *immbits = immediate >> 8;
10248 return (size == 16) ? 0xb : 0x3;
10249 }
10250 else if (immediate == (immediate & 0x00ff0000))
10251 {
10252 *immbits = immediate >> 16;
10253 return 0x5;
10254 }
10255 else if (immediate == (immediate & 0xff000000))
10256 {
10257 *immbits = immediate >> 24;
10258 return 0x7;
10259 }
10260
10261 bad_immediate:
10262 first_error (_("immediate value out of range"));
10263 return FAIL;
10264 }
10265
10266 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10267 A, B, C, D. */
10268
10269 static int
10270 neon_bits_same_in_bytes (unsigned imm)
10271 {
10272 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
10273 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
10274 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
10275 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
10276 }
10277
10278 /* For immediate of above form, return 0bABCD. */
10279
10280 static unsigned
10281 neon_squash_bits (unsigned imm)
10282 {
10283 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
10284 | ((imm & 0x01000000) >> 21);
10285 }
10286
10287 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10288
10289 static unsigned
10290 neon_qfloat_bits (unsigned imm)
10291 {
10292 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
10293 }
10294
10295 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10296 the instruction. *OP is passed as the initial value of the op field, and
10297 may be set to a different value depending on the constant (i.e.
10298 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10299 MVN). */
10300
10301 static int
10302 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
10303 int *op, int size, enum neon_el_type type)
10304 {
10305 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
10306 {
10307 if (size != 32 || *op == 1)
10308 return FAIL;
10309 *immbits = neon_qfloat_bits (immlo);
10310 return 0xf;
10311 }
10312 else if (size == 64 && neon_bits_same_in_bytes (immhi)
10313 && neon_bits_same_in_bytes (immlo))
10314 {
10315 /* Check this one first so we don't have to bother with immhi in later
10316 tests. */
10317 if (*op == 1)
10318 return FAIL;
10319 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
10320 *op = 1;
10321 return 0xe;
10322 }
10323 else if (immhi != 0)
10324 return FAIL;
10325 else if (immlo == (immlo & 0x000000ff))
10326 {
10327 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10328 immediate. */
10329 if ((size != 8 && size != 16 && size != 32)
10330 || (size == 8 && *op == 1))
10331 return FAIL;
10332 *immbits = immlo;
10333 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
10334 }
10335 else if (immlo == (immlo & 0x0000ff00))
10336 {
10337 if (size != 16 && size != 32)
10338 return FAIL;
10339 *immbits = immlo >> 8;
10340 return (size == 16) ? 0xa : 0x2;
10341 }
10342 else if (immlo == (immlo & 0x00ff0000))
10343 {
10344 if (size != 32)
10345 return FAIL;
10346 *immbits = immlo >> 16;
10347 return 0x4;
10348 }
10349 else if (immlo == (immlo & 0xff000000))
10350 {
10351 if (size != 32)
10352 return FAIL;
10353 *immbits = immlo >> 24;
10354 return 0x6;
10355 }
10356 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
10357 {
10358 if (size != 32)
10359 return FAIL;
10360 *immbits = (immlo >> 8) & 0xff;
10361 return 0xc;
10362 }
10363 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
10364 {
10365 if (size != 32)
10366 return FAIL;
10367 *immbits = (immlo >> 16) & 0xff;
10368 return 0xd;
10369 }
10370
10371 return FAIL;
10372 }
10373
10374 /* Write immediate bits [7:0] to the following locations:
10375
10376 |28/24|23 19|18 16|15 4|3 0|
10377 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10378
10379 This function is used by VMOV/VMVN/VORR/VBIC. */
10380
10381 static void
10382 neon_write_immbits (unsigned immbits)
10383 {
10384 inst.instruction |= immbits & 0xf;
10385 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
10386 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
10387 }
10388
10389 /* Invert low-order SIZE bits of XHI:XLO. */
10390
10391 static void
10392 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
10393 {
10394 unsigned immlo = xlo ? *xlo : 0;
10395 unsigned immhi = xhi ? *xhi : 0;
10396
10397 switch (size)
10398 {
10399 case 8:
10400 immlo = (~immlo) & 0xff;
10401 break;
10402
10403 case 16:
10404 immlo = (~immlo) & 0xffff;
10405 break;
10406
10407 case 64:
10408 immhi = (~immhi) & 0xffffffff;
10409 /* fall through. */
10410
10411 case 32:
10412 immlo = (~immlo) & 0xffffffff;
10413 break;
10414
10415 default:
10416 abort ();
10417 }
10418
10419 if (xlo)
10420 *xlo = immlo;
10421
10422 if (xhi)
10423 *xhi = immhi;
10424 }
10425
10426 static void
10427 do_neon_logic (void)
10428 {
10429 if (inst.operands[2].present && inst.operands[2].isreg)
10430 {
10431 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10432 neon_check_type (3, rs, N_IGNORE_TYPE);
10433 /* U bit and size field were set as part of the bitmask. */
10434 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10435 neon_three_same (rs == NS_QQQ, 0, -1);
10436 }
10437 else
10438 {
10439 enum neon_shape rs = neon_check_shape (NS_DI_QI);
10440 struct neon_type_el et = neon_check_type (1, rs, N_I8 | N_I16 | N_I32
10441 | N_I64 | N_F32);
10442 enum neon_opc opcode = inst.instruction & 0x0fffffff;
10443 unsigned immbits;
10444 int cmode;
10445
10446 if (et.type == NT_invtype)
10447 return;
10448
10449 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10450
10451 switch (opcode)
10452 {
10453 case N_MNEM_vbic:
10454 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10455 et.size);
10456 break;
10457
10458 case N_MNEM_vorr:
10459 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10460 et.size);
10461 break;
10462
10463 case N_MNEM_vand:
10464 /* Pseudo-instruction for VBIC. */
10465 immbits = inst.operands[1].imm;
10466 neon_invert_size (&immbits, 0, et.size);
10467 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10468 break;
10469
10470 case N_MNEM_vorn:
10471 /* Pseudo-instruction for VORR. */
10472 immbits = inst.operands[1].imm;
10473 neon_invert_size (&immbits, 0, et.size);
10474 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10475 break;
10476
10477 default:
10478 abort ();
10479 }
10480
10481 if (cmode == FAIL)
10482 return;
10483
10484 inst.instruction |= (rs == NS_QI) << 6;
10485 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10486 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10487 inst.instruction |= cmode << 8;
10488 neon_write_immbits (immbits);
10489
10490 inst.instruction = neon_dp_fixup (inst.instruction);
10491 }
10492 }
10493
10494 static void
10495 do_neon_bitfield (void)
10496 {
10497 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10498 neon_check_type (3, rs, N_IGNORE_TYPE);
10499 neon_three_same (rs == NS_QQQ, 0, -1);
10500 }
10501
10502 static void
10503 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
10504 unsigned destbits)
10505 {
10506 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10507 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
10508 types | N_KEY);
10509 if (et.type == NT_float)
10510 {
10511 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
10512 neon_three_same (rs == NS_QQQ, 0, -1);
10513 }
10514 else
10515 {
10516 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10517 neon_three_same (rs == NS_QQQ, et.type == ubit_meaning, et.size);
10518 }
10519 }
10520
10521 static void
10522 do_neon_dyadic_if_su (void)
10523 {
10524 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10525 }
10526
10527 static void
10528 do_neon_dyadic_if_su_d (void)
10529 {
10530 /* This version only allow D registers, but that constraint is enforced during
10531 operand parsing so we don't need to do anything extra here. */
10532 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10533 }
10534
10535 static void
10536 do_neon_dyadic_if_i (void)
10537 {
10538 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10539 }
10540
10541 static void
10542 do_neon_dyadic_if_i_d (void)
10543 {
10544 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10545 }
10546
10547 static void
10548 do_neon_addsub_if_i (void)
10549 {
10550 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10551 affected if we specify unsigned args. */
10552 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
10553 }
10554
10555 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10556 result to be:
10557 V<op> A,B (A is operand 0, B is operand 2)
10558 to mean:
10559 V<op> A,B,A
10560 not:
10561 V<op> A,B,B
10562 so handle that case specially. */
10563
10564 static void
10565 neon_exchange_operands (void)
10566 {
10567 void *scratch = alloca (sizeof (inst.operands[0]));
10568 if (inst.operands[1].present)
10569 {
10570 /* Swap operands[1] and operands[2]. */
10571 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
10572 inst.operands[1] = inst.operands[2];
10573 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
10574 }
10575 else
10576 {
10577 inst.operands[1] = inst.operands[2];
10578 inst.operands[2] = inst.operands[0];
10579 }
10580 }
10581
10582 static void
10583 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
10584 {
10585 if (inst.operands[2].isreg)
10586 {
10587 if (invert)
10588 neon_exchange_operands ();
10589 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
10590 }
10591 else
10592 {
10593 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10594 struct neon_type_el et = neon_check_type (2, rs,
10595 N_EQK | N_SIZ, immtypes | N_KEY);
10596
10597 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10598 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10599 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10600 inst.instruction |= LOW4 (inst.operands[1].reg);
10601 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10602 inst.instruction |= (rs == NS_QQI) << 6;
10603 inst.instruction |= (et.type == NT_float) << 10;
10604 inst.instruction |= neon_logbits (et.size) << 18;
10605
10606 inst.instruction = neon_dp_fixup (inst.instruction);
10607 }
10608 }
10609
10610 static void
10611 do_neon_cmp (void)
10612 {
10613 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
10614 }
10615
10616 static void
10617 do_neon_cmp_inv (void)
10618 {
10619 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
10620 }
10621
10622 static void
10623 do_neon_ceq (void)
10624 {
10625 neon_compare (N_IF_32, N_IF_32, FALSE);
10626 }
10627
10628 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10629 scalars, which are encoded in 5 bits, M : Rm.
10630 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10631 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10632 index in M. */
10633
10634 static unsigned
10635 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
10636 {
10637 unsigned regno = NEON_SCALAR_REG (scalar);
10638 unsigned elno = NEON_SCALAR_INDEX (scalar);
10639
10640 switch (elsize)
10641 {
10642 case 16:
10643 if (regno > 7 || elno > 3)
10644 goto bad_scalar;
10645 return regno | (elno << 3);
10646
10647 case 32:
10648 if (regno > 15 || elno > 1)
10649 goto bad_scalar;
10650 return regno | (elno << 4);
10651
10652 default:
10653 bad_scalar:
10654 first_error (_("scalar out of range for multiply instruction"));
10655 }
10656
10657 return 0;
10658 }
10659
10660 /* Encode multiply / multiply-accumulate scalar instructions. */
10661
10662 static void
10663 neon_mul_mac (struct neon_type_el et, int ubit)
10664 {
10665 unsigned scalar;
10666
10667 /* Give a more helpful error message if we have an invalid type. */
10668 if (et.type == NT_invtype)
10669 return;
10670
10671 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
10672 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10673 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10674 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10675 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10676 inst.instruction |= LOW4 (scalar);
10677 inst.instruction |= HI1 (scalar) << 5;
10678 inst.instruction |= (et.type == NT_float) << 8;
10679 inst.instruction |= neon_logbits (et.size) << 20;
10680 inst.instruction |= (ubit != 0) << 24;
10681
10682 inst.instruction = neon_dp_fixup (inst.instruction);
10683 }
10684
10685 static void
10686 do_neon_mac_maybe_scalar (void)
10687 {
10688 if (inst.operands[2].isscalar)
10689 {
10690 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10691 struct neon_type_el et = neon_check_type (3, rs,
10692 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
10693 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10694 neon_mul_mac (et, rs == NS_QQS);
10695 }
10696 else
10697 do_neon_dyadic_if_i ();
10698 }
10699
10700 static void
10701 do_neon_tst (void)
10702 {
10703 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10704 struct neon_type_el et = neon_check_type (3, rs,
10705 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
10706 neon_three_same (rs == NS_QQQ, 0, et.size);
10707 }
10708
10709 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10710 same types as the MAC equivalents. The polynomial type for this instruction
10711 is encoded the same as the integer type. */
10712
10713 static void
10714 do_neon_mul (void)
10715 {
10716 if (inst.operands[2].isscalar)
10717 do_neon_mac_maybe_scalar ();
10718 else
10719 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
10720 }
10721
10722 static void
10723 do_neon_qdmulh (void)
10724 {
10725 if (inst.operands[2].isscalar)
10726 {
10727 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10728 struct neon_type_el et = neon_check_type (3, rs,
10729 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10730 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10731 neon_mul_mac (et, rs == NS_QQS);
10732 }
10733 else
10734 {
10735 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10736 struct neon_type_el et = neon_check_type (3, rs,
10737 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10738 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10739 /* The U bit (rounding) comes from bit mask. */
10740 neon_three_same (rs == NS_QQQ, 0, et.size);
10741 }
10742 }
10743
10744 static void
10745 do_neon_fcmp_absolute (void)
10746 {
10747 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10748 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10749 /* Size field comes from bit mask. */
10750 neon_three_same (rs == NS_QQQ, 1, -1);
10751 }
10752
10753 static void
10754 do_neon_fcmp_absolute_inv (void)
10755 {
10756 neon_exchange_operands ();
10757 do_neon_fcmp_absolute ();
10758 }
10759
10760 static void
10761 do_neon_step (void)
10762 {
10763 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10764 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10765 neon_three_same (rs == NS_QQQ, 0, -1);
10766 }
10767
10768 static void
10769 do_neon_abs_neg (void)
10770 {
10771 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10772 struct neon_type_el et = neon_check_type (3, rs,
10773 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
10774 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10775 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10776 inst.instruction |= LOW4 (inst.operands[1].reg);
10777 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10778 inst.instruction |= (rs == NS_QQ) << 6;
10779 inst.instruction |= (et.type == NT_float) << 10;
10780 inst.instruction |= neon_logbits (et.size) << 18;
10781
10782 inst.instruction = neon_dp_fixup (inst.instruction);
10783 }
10784
10785 static void
10786 do_neon_sli (void)
10787 {
10788 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10789 struct neon_type_el et = neon_check_type (2, rs,
10790 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10791 int imm = inst.operands[2].imm;
10792 constraint (imm < 0 || (unsigned)imm >= et.size,
10793 _("immediate out of range for insert"));
10794 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10795 }
10796
10797 static void
10798 do_neon_sri (void)
10799 {
10800 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10801 struct neon_type_el et = neon_check_type (2, rs,
10802 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10803 int imm = inst.operands[2].imm;
10804 constraint (imm < 1 || (unsigned)imm > et.size,
10805 _("immediate out of range for insert"));
10806 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, et.size - imm);
10807 }
10808
10809 static void
10810 do_neon_qshlu_imm (void)
10811 {
10812 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10813 struct neon_type_el et = neon_check_type (2, rs,
10814 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
10815 int imm = inst.operands[2].imm;
10816 constraint (imm < 0 || (unsigned)imm >= et.size,
10817 _("immediate out of range for shift"));
10818 /* Only encodes the 'U present' variant of the instruction.
10819 In this case, signed types have OP (bit 8) set to 0.
10820 Unsigned types have OP set to 1. */
10821 inst.instruction |= (et.type == NT_unsigned) << 8;
10822 /* The rest of the bits are the same as other immediate shifts. */
10823 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10824 }
10825
10826 static void
10827 do_neon_qmovn (void)
10828 {
10829 struct neon_type_el et = neon_check_type (2, NS_DQ,
10830 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10831 /* Saturating move where operands can be signed or unsigned, and the
10832 destination has the same signedness. */
10833 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10834 if (et.type == NT_unsigned)
10835 inst.instruction |= 0xc0;
10836 else
10837 inst.instruction |= 0x80;
10838 neon_two_same (0, 1, et.size / 2);
10839 }
10840
10841 static void
10842 do_neon_qmovun (void)
10843 {
10844 struct neon_type_el et = neon_check_type (2, NS_DQ,
10845 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10846 /* Saturating move with unsigned results. Operands must be signed. */
10847 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10848 neon_two_same (0, 1, et.size / 2);
10849 }
10850
10851 static void
10852 do_neon_rshift_sat_narrow (void)
10853 {
10854 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10855 or unsigned. If operands are unsigned, results must also be unsigned. */
10856 struct neon_type_el et = neon_check_type (2, NS_DQI,
10857 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10858 int imm = inst.operands[2].imm;
10859 /* This gets the bounds check, size encoding and immediate bits calculation
10860 right. */
10861 et.size /= 2;
10862
10863 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10864 VQMOVN.I<size> <Dd>, <Qm>. */
10865 if (imm == 0)
10866 {
10867 inst.operands[2].present = 0;
10868 inst.instruction = N_MNEM_vqmovn;
10869 do_neon_qmovn ();
10870 return;
10871 }
10872
10873 constraint (imm < 1 || (unsigned)imm > et.size,
10874 _("immediate out of range"));
10875 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
10876 }
10877
10878 static void
10879 do_neon_rshift_sat_narrow_u (void)
10880 {
10881 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10882 or unsigned. If operands are unsigned, results must also be unsigned. */
10883 struct neon_type_el et = neon_check_type (2, NS_DQI,
10884 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10885 int imm = inst.operands[2].imm;
10886 /* This gets the bounds check, size encoding and immediate bits calculation
10887 right. */
10888 et.size /= 2;
10889
10890 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10891 VQMOVUN.I<size> <Dd>, <Qm>. */
10892 if (imm == 0)
10893 {
10894 inst.operands[2].present = 0;
10895 inst.instruction = N_MNEM_vqmovun;
10896 do_neon_qmovun ();
10897 return;
10898 }
10899
10900 constraint (imm < 1 || (unsigned)imm > et.size,
10901 _("immediate out of range"));
10902 /* FIXME: The manual is kind of unclear about what value U should have in
10903 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10904 must be 1. */
10905 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
10906 }
10907
10908 static void
10909 do_neon_movn (void)
10910 {
10911 struct neon_type_el et = neon_check_type (2, NS_DQ,
10912 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10913 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10914 neon_two_same (0, 1, et.size / 2);
10915 }
10916
10917 static void
10918 do_neon_rshift_narrow (void)
10919 {
10920 struct neon_type_el et = neon_check_type (2, NS_DQI,
10921 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10922 int imm = inst.operands[2].imm;
10923 /* This gets the bounds check, size encoding and immediate bits calculation
10924 right. */
10925 et.size /= 2;
10926
10927 /* If immediate is zero then we are a pseudo-instruction for
10928 VMOVN.I<size> <Dd>, <Qm> */
10929 if (imm == 0)
10930 {
10931 inst.operands[2].present = 0;
10932 inst.instruction = N_MNEM_vmovn;
10933 do_neon_movn ();
10934 return;
10935 }
10936
10937 constraint (imm < 1 || (unsigned)imm > et.size,
10938 _("immediate out of range for narrowing operation"));
10939 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
10940 }
10941
10942 static void
10943 do_neon_shll (void)
10944 {
10945 /* FIXME: Type checking when lengthening. */
10946 struct neon_type_el et = neon_check_type (2, NS_QDI,
10947 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
10948 unsigned imm = inst.operands[2].imm;
10949
10950 if (imm == et.size)
10951 {
10952 /* Maximum shift variant. */
10953 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10954 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10955 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10956 inst.instruction |= LOW4 (inst.operands[1].reg);
10957 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10958 inst.instruction |= neon_logbits (et.size) << 18;
10959
10960 inst.instruction = neon_dp_fixup (inst.instruction);
10961 }
10962 else
10963 {
10964 /* A more-specific type check for non-max versions. */
10965 et = neon_check_type (2, NS_QDI,
10966 N_EQK | N_DBL, N_SU_32 | N_KEY);
10967 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10968 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
10969 }
10970 }
10971
10972 /* Check the various types for the VCVT instruction, and return the one that
10973 the current instruction is. */
10974
10975 static int
10976 neon_cvt_flavour (enum neon_shape rs)
10977 {
10978 #define CVT_VAR(C,X,Y) \
10979 et = neon_check_type (2, rs, (X), (Y)); \
10980 if (et.type != NT_invtype) \
10981 { \
10982 inst.error = NULL; \
10983 return (C); \
10984 }
10985 struct neon_type_el et;
10986
10987 CVT_VAR (0, N_S32, N_F32);
10988 CVT_VAR (1, N_U32, N_F32);
10989 CVT_VAR (2, N_F32, N_S32);
10990 CVT_VAR (3, N_F32, N_U32);
10991
10992 return -1;
10993 #undef CVT_VAR
10994 }
10995
10996 static void
10997 do_neon_cvt (void)
10998 {
10999 /* Fixed-point conversion with #0 immediate is encoded as an integer
11000 conversion. */
11001 if (inst.operands[2].present && inst.operands[2].imm != 0)
11002 {
11003 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11004 int flavour = neon_cvt_flavour (rs);
11005 unsigned immbits = 32 - inst.operands[2].imm;
11006 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
11007 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11008 if (flavour != -1)
11009 inst.instruction |= enctab[flavour];
11010 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11011 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11012 inst.instruction |= LOW4 (inst.operands[1].reg);
11013 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11014 inst.instruction |= (rs == NS_QQI) << 6;
11015 inst.instruction |= 1 << 21;
11016 inst.instruction |= immbits << 16;
11017 }
11018 else
11019 {
11020 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11021 int flavour = neon_cvt_flavour (rs);
11022 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
11023 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11024 if (flavour != -1)
11025 inst.instruction |= enctab[flavour];
11026 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11027 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11028 inst.instruction |= LOW4 (inst.operands[1].reg);
11029 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11030 inst.instruction |= (rs == NS_QQ) << 6;
11031 inst.instruction |= 2 << 18;
11032 }
11033 inst.instruction = neon_dp_fixup (inst.instruction);
11034 }
11035
11036 static void
11037 neon_move_immediate (void)
11038 {
11039 enum neon_shape rs = neon_check_shape (NS_DI_QI);
11040 struct neon_type_el et = neon_check_type (1, rs,
11041 N_I8 | N_I16 | N_I32 | N_I64 | N_F32);
11042 unsigned immlo, immhi = 0, immbits;
11043 int op, cmode;
11044
11045 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11046 op = (inst.instruction & (1 << 5)) != 0;
11047
11048 immlo = inst.operands[1].imm;
11049 if (inst.operands[1].regisimm)
11050 immhi = inst.operands[1].reg;
11051
11052 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
11053 _("immediate has bits set outside the operand size"));
11054
11055 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11056 et.size, et.type)) == FAIL)
11057 {
11058 /* Invert relevant bits only. */
11059 neon_invert_size (&immlo, &immhi, et.size);
11060 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11061 with one or the other; those cases are caught by
11062 neon_cmode_for_move_imm. */
11063 op = !op;
11064 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11065 et.size, et.type)) == FAIL)
11066 {
11067 first_error (_("immediate out of range"));
11068 return;
11069 }
11070 }
11071
11072 inst.instruction &= ~(1 << 5);
11073 inst.instruction |= op << 5;
11074
11075 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11076 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11077 inst.instruction |= (rs == NS_QI) << 6;
11078 inst.instruction |= cmode << 8;
11079
11080 neon_write_immbits (immbits);
11081 }
11082
11083 static void
11084 do_neon_mvn (void)
11085 {
11086 if (inst.operands[1].isreg)
11087 {
11088 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11089
11090 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11091 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11092 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11093 inst.instruction |= LOW4 (inst.operands[1].reg);
11094 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11095 inst.instruction |= (rs == NS_QQ) << 6;
11096 }
11097 else
11098 {
11099 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11100 neon_move_immediate ();
11101 }
11102
11103 inst.instruction = neon_dp_fixup (inst.instruction);
11104 }
11105
11106 /* Encode instructions of form:
11107
11108 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11109 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11110
11111 */
11112
11113 static void
11114 neon_mixed_length (struct neon_type_el et, unsigned size)
11115 {
11116 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11117 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11118 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11119 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11120 inst.instruction |= LOW4 (inst.operands[2].reg);
11121 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11122 inst.instruction |= (et.type == NT_unsigned) << 24;
11123 inst.instruction |= neon_logbits (size) << 20;
11124
11125 inst.instruction = neon_dp_fixup (inst.instruction);
11126 }
11127
11128 static void
11129 do_neon_dyadic_long (void)
11130 {
11131 /* FIXME: Type checking for lengthening op. */
11132 struct neon_type_el et = neon_check_type (3, NS_QDD,
11133 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
11134 neon_mixed_length (et, et.size);
11135 }
11136
11137 static void
11138 do_neon_abal (void)
11139 {
11140 struct neon_type_el et = neon_check_type (3, NS_QDD,
11141 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
11142 neon_mixed_length (et, et.size);
11143 }
11144
11145 static void
11146 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
11147 {
11148 if (inst.operands[2].isscalar)
11149 {
11150 struct neon_type_el et = neon_check_type (3, NS_QDS,
11151 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
11152 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11153 neon_mul_mac (et, et.type == NT_unsigned);
11154 }
11155 else
11156 {
11157 struct neon_type_el et = neon_check_type (3, NS_QDD,
11158 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
11159 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11160 neon_mixed_length (et, et.size);
11161 }
11162 }
11163
11164 static void
11165 do_neon_mac_maybe_scalar_long (void)
11166 {
11167 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
11168 }
11169
11170 static void
11171 do_neon_dyadic_wide (void)
11172 {
11173 struct neon_type_el et = neon_check_type (3, NS_QQD,
11174 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
11175 neon_mixed_length (et, et.size);
11176 }
11177
11178 static void
11179 do_neon_dyadic_narrow (void)
11180 {
11181 struct neon_type_el et = neon_check_type (3, NS_QDD,
11182 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
11183 neon_mixed_length (et, et.size / 2);
11184 }
11185
11186 static void
11187 do_neon_mul_sat_scalar_long (void)
11188 {
11189 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
11190 }
11191
11192 static void
11193 do_neon_vmull (void)
11194 {
11195 if (inst.operands[2].isscalar)
11196 do_neon_mac_maybe_scalar_long ();
11197 else
11198 {
11199 struct neon_type_el et = neon_check_type (3, NS_QDD,
11200 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
11201 if (et.type == NT_poly)
11202 inst.instruction = NEON_ENC_POLY (inst.instruction);
11203 else
11204 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11205 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11206 zero. Should be OK as-is. */
11207 neon_mixed_length (et, et.size);
11208 }
11209 }
11210
11211 static void
11212 do_neon_ext (void)
11213 {
11214 enum neon_shape rs = neon_check_shape (NS_DDDI_QQQI);
11215 struct neon_type_el et = neon_check_type (3, rs,
11216 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
11217 unsigned imm = (inst.operands[3].imm * et.size) / 8;
11218 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11219 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11220 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11221 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11222 inst.instruction |= LOW4 (inst.operands[2].reg);
11223 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11224 inst.instruction |= (rs == NS_QQQI) << 6;
11225 inst.instruction |= imm << 8;
11226
11227 inst.instruction = neon_dp_fixup (inst.instruction);
11228 }
11229
11230 static void
11231 do_neon_rev (void)
11232 {
11233 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11234 struct neon_type_el et = neon_check_type (2, rs,
11235 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11236 unsigned op = (inst.instruction >> 7) & 3;
11237 /* N (width of reversed regions) is encoded as part of the bitmask. We
11238 extract it here to check the elements to be reversed are smaller.
11239 Otherwise we'd get a reserved instruction. */
11240 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
11241 assert (elsize != 0);
11242 constraint (et.size >= elsize,
11243 _("elements must be smaller than reversal region"));
11244 neon_two_same (rs == NS_QQ, 1, et.size);
11245 }
11246
11247 static void
11248 do_neon_dup (void)
11249 {
11250 if (inst.operands[1].isscalar)
11251 {
11252 enum neon_shape rs = neon_check_shape (NS_DS_QS);
11253 struct neon_type_el et = neon_check_type (2, rs,
11254 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11255 unsigned sizebits = et.size >> 3;
11256 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
11257 int logsize = neon_logbits (et.size);
11258 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
11259 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11260 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11261 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11262 inst.instruction |= LOW4 (dm);
11263 inst.instruction |= HI1 (dm) << 5;
11264 inst.instruction |= (rs == NS_QS) << 6;
11265 inst.instruction |= x << 17;
11266 inst.instruction |= sizebits << 16;
11267
11268 inst.instruction = neon_dp_fixup (inst.instruction);
11269 }
11270 else
11271 {
11272 enum neon_shape rs = neon_check_shape (NS_DR_QR);
11273 struct neon_type_el et = neon_check_type (1, rs,
11274 N_8 | N_16 | N_32 | N_KEY);
11275 unsigned save_cond = inst.instruction & 0xf0000000;
11276 /* Duplicate ARM register to lanes of vector. */
11277 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
11278 switch (et.size)
11279 {
11280 case 8: inst.instruction |= 0x400000; break;
11281 case 16: inst.instruction |= 0x000020; break;
11282 case 32: inst.instruction |= 0x000000; break;
11283 default: break;
11284 }
11285 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11286 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
11287 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
11288 inst.instruction |= (rs == NS_QR) << 21;
11289 /* The encoding for this instruction is identical for the ARM and Thumb
11290 variants, except for the condition field. */
11291 if (thumb_mode)
11292 inst.instruction |= 0xe0000000;
11293 else
11294 inst.instruction |= save_cond;
11295 }
11296 }
11297
11298 /* VMOV has particularly many variations. It can be one of:
11299 0. VMOV<c><q> <Qd>, <Qm>
11300 1. VMOV<c><q> <Dd>, <Dm>
11301 (Register operations, which are VORR with Rm = Rn.)
11302 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11303 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11304 (Immediate loads.)
11305 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11306 (ARM register to scalar.)
11307 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11308 (Two ARM registers to vector.)
11309 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11310 (Scalar to ARM register.)
11311 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11312 (Vector to two ARM registers.)
11313
11314 We should have just enough information to be able to disambiguate most of
11315 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11316 registers" cases. For these, abuse the .regisimm operand field to signify a
11317 Neon register.
11318
11319 All the encoded bits are hardcoded by this function.
11320
11321 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
11322 Cases 5, 7 may be used with VFPv2 and above.
11323
11324 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11325 can specify a type where it doesn't make sense to, and is ignored).
11326 */
11327
11328 static void
11329 do_neon_mov (void)
11330 {
11331 int nargs = inst.operands[0].present + inst.operands[1].present
11332 + inst.operands[2].present;
11333 unsigned save_cond = thumb_mode ? 0xe0000000 : inst.instruction & 0xf0000000;
11334 const char *vfp_vers = "selected FPU does not support instruction";
11335
11336 switch (nargs)
11337 {
11338 case 2:
11339 /* Cases 0, 1, 2, 3, 4, 6. */
11340 if (inst.operands[1].isscalar)
11341 {
11342 /* Case 6. */
11343 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11344 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
11345 unsigned logsize = neon_logbits (et.size);
11346 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
11347 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
11348 unsigned abcdebits = 0;
11349
11350 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11351 _(vfp_vers));
11352 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11353 && et.size != 32, _(vfp_vers));
11354 constraint (et.type == NT_invtype, _("bad type for scalar"));
11355 constraint (x >= 64 / et.size, _("scalar index out of range"));
11356
11357 switch (et.size)
11358 {
11359 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
11360 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
11361 case 32: abcdebits = 0x00; break;
11362 default: ;
11363 }
11364
11365 abcdebits |= x << logsize;
11366 inst.instruction = save_cond;
11367 inst.instruction |= 0xe100b10;
11368 inst.instruction |= LOW4 (dn) << 16;
11369 inst.instruction |= HI1 (dn) << 7;
11370 inst.instruction |= inst.operands[0].reg << 12;
11371 inst.instruction |= (abcdebits & 3) << 5;
11372 inst.instruction |= (abcdebits >> 2) << 21;
11373 }
11374 else if (inst.operands[1].isreg)
11375 {
11376 /* Cases 0, 1, 4. */
11377 if (inst.operands[0].isscalar)
11378 {
11379 /* Case 4. */
11380 unsigned bcdebits = 0;
11381 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11382 N_8 | N_16 | N_32 | N_KEY, N_EQK);
11383 int logsize = neon_logbits (et.size);
11384 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
11385 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
11386
11387 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11388 _(vfp_vers));
11389 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11390 && et.size != 32, _(vfp_vers));
11391 constraint (et.type == NT_invtype, _("bad type for scalar"));
11392 constraint (x >= 64 / et.size, _("scalar index out of range"));
11393
11394 switch (et.size)
11395 {
11396 case 8: bcdebits = 0x8; break;
11397 case 16: bcdebits = 0x1; break;
11398 case 32: bcdebits = 0x0; break;
11399 default: ;
11400 }
11401
11402 bcdebits |= x << logsize;
11403 inst.instruction = save_cond;
11404 inst.instruction |= 0xe000b10;
11405 inst.instruction |= LOW4 (dn) << 16;
11406 inst.instruction |= HI1 (dn) << 7;
11407 inst.instruction |= inst.operands[1].reg << 12;
11408 inst.instruction |= (bcdebits & 3) << 5;
11409 inst.instruction |= (bcdebits >> 2) << 21;
11410 }
11411 else
11412 {
11413 /* Cases 0, 1. */
11414 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11415 /* The architecture manual I have doesn't explicitly state which
11416 value the U bit should have for register->register moves, but
11417 the equivalent VORR instruction has U = 0, so do that. */
11418 inst.instruction = 0x0200110;
11419 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11420 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11421 inst.instruction |= LOW4 (inst.operands[1].reg);
11422 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11423 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11424 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11425 inst.instruction |= (rs == NS_QQ) << 6;
11426
11427 inst.instruction = neon_dp_fixup (inst.instruction);
11428 }
11429 }
11430 else
11431 {
11432 /* Cases 2, 3. */
11433 inst.instruction = 0x0800010;
11434 neon_move_immediate ();
11435 inst.instruction = neon_dp_fixup (inst.instruction);
11436 }
11437 break;
11438
11439 case 3:
11440 /* Cases 5, 7. */
11441 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
11442 _(vfp_vers));
11443
11444 if (inst.operands[0].regisimm)
11445 {
11446 /* Case 5. */
11447 inst.instruction = save_cond;
11448 inst.instruction |= 0xc400b10;
11449 inst.instruction |= LOW4 (inst.operands[0].reg);
11450 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
11451 inst.instruction |= inst.operands[1].reg << 12;
11452 inst.instruction |= inst.operands[2].reg << 16;
11453 }
11454 else
11455 {
11456 /* Case 7. */
11457 inst.instruction = save_cond;
11458 inst.instruction |= 0xc500b10;
11459 inst.instruction |= inst.operands[0].reg << 12;
11460 inst.instruction |= inst.operands[1].reg << 16;
11461 inst.instruction |= LOW4 (inst.operands[2].reg);
11462 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11463 }
11464 break;
11465
11466 default:
11467 abort ();
11468 }
11469 }
11470
11471 static void
11472 do_neon_rshift_round_imm (void)
11473 {
11474 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11475 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11476 int imm = inst.operands[2].imm;
11477
11478 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11479 if (imm == 0)
11480 {
11481 inst.operands[2].present = 0;
11482 do_neon_mov ();
11483 return;
11484 }
11485
11486 constraint (imm < 1 || (unsigned)imm > et.size,
11487 _("immediate out of range for shift"));
11488 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
11489 et.size - imm);
11490 }
11491
11492 static void
11493 do_neon_movl (void)
11494 {
11495 struct neon_type_el et = neon_check_type (2, NS_QD,
11496 N_EQK | N_DBL, N_SU_32 | N_KEY);
11497 unsigned sizebits = et.size >> 3;
11498 inst.instruction |= sizebits << 19;
11499 neon_two_same (0, et.type == NT_unsigned, -1);
11500 }
11501
11502 static void
11503 do_neon_trn (void)
11504 {
11505 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11506 struct neon_type_el et = neon_check_type (2, rs,
11507 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11508 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11509 neon_two_same (rs == NS_QQ, 1, et.size);
11510 }
11511
11512 static void
11513 do_neon_zip_uzp (void)
11514 {
11515 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11516 struct neon_type_el et = neon_check_type (2, rs,
11517 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11518 if (rs == NS_DD && et.size == 32)
11519 {
11520 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11521 inst.instruction = N_MNEM_vtrn;
11522 do_neon_trn ();
11523 return;
11524 }
11525 neon_two_same (rs == NS_QQ, 1, et.size);
11526 }
11527
11528 static void
11529 do_neon_sat_abs_neg (void)
11530 {
11531 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11532 struct neon_type_el et = neon_check_type (2, rs,
11533 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11534 neon_two_same (rs == NS_QQ, 1, et.size);
11535 }
11536
11537 static void
11538 do_neon_pair_long (void)
11539 {
11540 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11541 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
11542 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11543 inst.instruction |= (et.type == NT_unsigned) << 7;
11544 neon_two_same (rs == NS_QQ, 1, et.size);
11545 }
11546
11547 static void
11548 do_neon_recip_est (void)
11549 {
11550 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11551 struct neon_type_el et = neon_check_type (2, rs,
11552 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
11553 inst.instruction |= (et.type == NT_float) << 8;
11554 neon_two_same (rs == NS_QQ, 1, et.size);
11555 }
11556
11557 static void
11558 do_neon_cls (void)
11559 {
11560 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11561 struct neon_type_el et = neon_check_type (2, rs,
11562 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11563 neon_two_same (rs == NS_QQ, 1, et.size);
11564 }
11565
11566 static void
11567 do_neon_clz (void)
11568 {
11569 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11570 struct neon_type_el et = neon_check_type (2, rs,
11571 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
11572 neon_two_same (rs == NS_QQ, 1, et.size);
11573 }
11574
11575 static void
11576 do_neon_cnt (void)
11577 {
11578 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11579 struct neon_type_el et = neon_check_type (2, rs,
11580 N_EQK | N_INT, N_8 | N_KEY);
11581 neon_two_same (rs == NS_QQ, 1, et.size);
11582 }
11583
11584 static void
11585 do_neon_swp (void)
11586 {
11587 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11588 neon_two_same (rs == NS_QQ, 1, -1);
11589 }
11590
11591 static void
11592 do_neon_tbl_tbx (void)
11593 {
11594 unsigned listlenbits;
11595 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
11596
11597 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
11598 {
11599 first_error (_("bad list length for table lookup"));
11600 return;
11601 }
11602
11603 listlenbits = inst.operands[1].imm - 1;
11604 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11605 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11606 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11607 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11608 inst.instruction |= LOW4 (inst.operands[2].reg);
11609 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11610 inst.instruction |= listlenbits << 8;
11611
11612 inst.instruction = neon_dp_fixup (inst.instruction);
11613 }
11614
11615 static void
11616 do_neon_ldm_stm (void)
11617 {
11618 /* P, U and L bits are part of bitmask. */
11619 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
11620 unsigned offsetbits = inst.operands[1].imm * 2;
11621
11622 constraint (is_dbmode && !inst.operands[0].writeback,
11623 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11624
11625 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
11626 _("register list must contain at least 1 and at most 16 "
11627 "registers"));
11628
11629 inst.instruction |= inst.operands[0].reg << 16;
11630 inst.instruction |= inst.operands[0].writeback << 21;
11631 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11632 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
11633
11634 inst.instruction |= offsetbits;
11635
11636 if (thumb_mode)
11637 inst.instruction |= 0xe0000000;
11638 }
11639
11640 static void
11641 do_neon_ldr_str (void)
11642 {
11643 unsigned offsetbits;
11644 int offset_up = 1;
11645 int is_ldr = (inst.instruction & (1 << 20)) != 0;
11646
11647 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11648 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11649
11650 constraint (inst.reloc.pc_rel && !is_ldr,
11651 _("PC-relative addressing unavailable with VSTR"));
11652
11653 constraint (!inst.reloc.pc_rel && inst.reloc.exp.X_op != O_constant,
11654 _("Immediate value must be a constant"));
11655
11656 if (inst.reloc.exp.X_add_number < 0)
11657 {
11658 offset_up = 0;
11659 offsetbits = -inst.reloc.exp.X_add_number / 4;
11660 }
11661 else
11662 offsetbits = inst.reloc.exp.X_add_number / 4;
11663
11664 /* FIXME: Does this catch everything? */
11665 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11666 || inst.operands[1].postind || inst.operands[1].writeback
11667 || inst.operands[1].immisreg || inst.operands[1].shifted,
11668 BAD_ADDR_MODE);
11669 constraint ((inst.operands[1].imm & 3) != 0,
11670 _("Offset must be a multiple of 4"));
11671 constraint (offsetbits != (offsetbits & 0xff),
11672 _("Immediate offset out of range"));
11673
11674 inst.instruction |= inst.operands[1].reg << 16;
11675 inst.instruction |= offsetbits & 0xff;
11676 inst.instruction |= offset_up << 23;
11677
11678 if (thumb_mode)
11679 inst.instruction |= 0xe0000000;
11680
11681 if (inst.reloc.pc_rel)
11682 {
11683 if (thumb_mode)
11684 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
11685 else
11686 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
11687 }
11688 else
11689 inst.reloc.type = BFD_RELOC_UNUSED;
11690 }
11691
11692 /* "interleave" version also handles non-interleaving register VLD1/VST1
11693 instructions. */
11694
11695 static void
11696 do_neon_ld_st_interleave (void)
11697 {
11698 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
11699 N_8 | N_16 | N_32 | N_64);
11700 unsigned alignbits = 0;
11701 unsigned idx;
11702 /* The bits in this table go:
11703 0: register stride of one (0) or two (1)
11704 1,2: register list length, minus one (1, 2, 3, 4).
11705 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11706 We use -1 for invalid entries. */
11707 const int typetable[] =
11708 {
11709 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11710 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11711 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11712 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11713 };
11714 int typebits;
11715
11716 if (et.type == NT_invtype)
11717 return;
11718
11719 if (inst.operands[1].immisalign)
11720 switch (inst.operands[1].imm >> 8)
11721 {
11722 case 64: alignbits = 1; break;
11723 case 128:
11724 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11725 goto bad_alignment;
11726 alignbits = 2;
11727 break;
11728 case 256:
11729 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11730 goto bad_alignment;
11731 alignbits = 3;
11732 break;
11733 default:
11734 bad_alignment:
11735 first_error (_("bad alignment"));
11736 return;
11737 }
11738
11739 inst.instruction |= alignbits << 4;
11740 inst.instruction |= neon_logbits (et.size) << 6;
11741
11742 /* Bits [4:6] of the immediate in a list specifier encode register stride
11743 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11744 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11745 up the right value for "type" in a table based on this value and the given
11746 list style, then stick it back. */
11747 idx = ((inst.operands[0].imm >> 4) & 7)
11748 | (((inst.instruction >> 8) & 3) << 3);
11749
11750 typebits = typetable[idx];
11751
11752 constraint (typebits == -1, _("bad list type for instruction"));
11753
11754 inst.instruction &= ~0xf00;
11755 inst.instruction |= typebits << 8;
11756 }
11757
11758 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11759 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11760 otherwise. The variable arguments are a list of pairs of legal (size, align)
11761 values, terminated with -1. */
11762
11763 static int
11764 neon_alignment_bit (int size, int align, int *do_align, ...)
11765 {
11766 va_list ap;
11767 int result = FAIL, thissize, thisalign;
11768
11769 if (!inst.operands[1].immisalign)
11770 {
11771 *do_align = 0;
11772 return SUCCESS;
11773 }
11774
11775 va_start (ap, do_align);
11776
11777 do
11778 {
11779 thissize = va_arg (ap, int);
11780 if (thissize == -1)
11781 break;
11782 thisalign = va_arg (ap, int);
11783
11784 if (size == thissize && align == thisalign)
11785 result = SUCCESS;
11786 }
11787 while (result != SUCCESS);
11788
11789 va_end (ap);
11790
11791 if (result == SUCCESS)
11792 *do_align = 1;
11793 else
11794 first_error (_("unsupported alignment for instruction"));
11795
11796 return result;
11797 }
11798
11799 static void
11800 do_neon_ld_st_lane (void)
11801 {
11802 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11803 int align_good, do_align = 0;
11804 int logsize = neon_logbits (et.size);
11805 int align = inst.operands[1].imm >> 8;
11806 int n = (inst.instruction >> 8) & 3;
11807 int max_el = 64 / et.size;
11808
11809 if (et.type == NT_invtype)
11810 return;
11811
11812 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
11813 _("bad list length"));
11814 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
11815 _("scalar index out of range"));
11816 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
11817 && et.size == 8,
11818 _("stride of 2 unavailable when element size is 8"));
11819
11820 switch (n)
11821 {
11822 case 0: /* VLD1 / VST1. */
11823 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
11824 32, 32, -1);
11825 if (align_good == FAIL)
11826 return;
11827 if (do_align)
11828 {
11829 unsigned alignbits = 0;
11830 switch (et.size)
11831 {
11832 case 16: alignbits = 0x1; break;
11833 case 32: alignbits = 0x3; break;
11834 default: ;
11835 }
11836 inst.instruction |= alignbits << 4;
11837 }
11838 break;
11839
11840 case 1: /* VLD2 / VST2. */
11841 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
11842 32, 64, -1);
11843 if (align_good == FAIL)
11844 return;
11845 if (do_align)
11846 inst.instruction |= 1 << 4;
11847 break;
11848
11849 case 2: /* VLD3 / VST3. */
11850 constraint (inst.operands[1].immisalign,
11851 _("can't use alignment with this instruction"));
11852 break;
11853
11854 case 3: /* VLD4 / VST4. */
11855 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11856 16, 64, 32, 64, 32, 128, -1);
11857 if (align_good == FAIL)
11858 return;
11859 if (do_align)
11860 {
11861 unsigned alignbits = 0;
11862 switch (et.size)
11863 {
11864 case 8: alignbits = 0x1; break;
11865 case 16: alignbits = 0x1; break;
11866 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
11867 default: ;
11868 }
11869 inst.instruction |= alignbits << 4;
11870 }
11871 break;
11872
11873 default: ;
11874 }
11875
11876 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11877 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11878 inst.instruction |= 1 << (4 + logsize);
11879
11880 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
11881 inst.instruction |= logsize << 10;
11882 }
11883
11884 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11885
11886 static void
11887 do_neon_ld_dup (void)
11888 {
11889 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11890 int align_good, do_align = 0;
11891
11892 if (et.type == NT_invtype)
11893 return;
11894
11895 switch ((inst.instruction >> 8) & 3)
11896 {
11897 case 0: /* VLD1. */
11898 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
11899 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11900 &do_align, 16, 16, 32, 32, -1);
11901 if (align_good == FAIL)
11902 return;
11903 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
11904 {
11905 case 1: break;
11906 case 2: inst.instruction |= 1 << 5; break;
11907 default: first_error (_("bad list length")); return;
11908 }
11909 inst.instruction |= neon_logbits (et.size) << 6;
11910 break;
11911
11912 case 1: /* VLD2. */
11913 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11914 &do_align, 8, 16, 16, 32, 32, 64, -1);
11915 if (align_good == FAIL)
11916 return;
11917 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
11918 _("bad list length"));
11919 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11920 inst.instruction |= 1 << 5;
11921 inst.instruction |= neon_logbits (et.size) << 6;
11922 break;
11923
11924 case 2: /* VLD3. */
11925 constraint (inst.operands[1].immisalign,
11926 _("can't use alignment with this instruction"));
11927 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
11928 _("bad list length"));
11929 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11930 inst.instruction |= 1 << 5;
11931 inst.instruction |= neon_logbits (et.size) << 6;
11932 break;
11933
11934 case 3: /* VLD4. */
11935 {
11936 int align = inst.operands[1].imm >> 8;
11937 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11938 16, 64, 32, 64, 32, 128, -1);
11939 if (align_good == FAIL)
11940 return;
11941 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
11942 _("bad list length"));
11943 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11944 inst.instruction |= 1 << 5;
11945 if (et.size == 32 && align == 128)
11946 inst.instruction |= 0x3 << 6;
11947 else
11948 inst.instruction |= neon_logbits (et.size) << 6;
11949 }
11950 break;
11951
11952 default: ;
11953 }
11954
11955 inst.instruction |= do_align << 4;
11956 }
11957
11958 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
11959 apart from bits [11:4]. */
11960
11961 static void
11962 do_neon_ldx_stx (void)
11963 {
11964 switch (NEON_LANE (inst.operands[0].imm))
11965 {
11966 case NEON_INTERLEAVE_LANES:
11967 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
11968 do_neon_ld_st_interleave ();
11969 break;
11970
11971 case NEON_ALL_LANES:
11972 inst.instruction = NEON_ENC_DUP (inst.instruction);
11973 do_neon_ld_dup ();
11974 break;
11975
11976 default:
11977 inst.instruction = NEON_ENC_LANE (inst.instruction);
11978 do_neon_ld_st_lane ();
11979 }
11980
11981 /* L bit comes from bit mask. */
11982 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11983 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11984 inst.instruction |= inst.operands[1].reg << 16;
11985
11986 if (inst.operands[1].postind)
11987 {
11988 int postreg = inst.operands[1].imm & 0xf;
11989 constraint (!inst.operands[1].immisreg,
11990 _("post-index must be a register"));
11991 constraint (postreg == 0xd || postreg == 0xf,
11992 _("bad register for post-index"));
11993 inst.instruction |= postreg;
11994 }
11995 else if (inst.operands[1].writeback)
11996 {
11997 inst.instruction |= 0xd;
11998 }
11999 else
12000 inst.instruction |= 0xf;
12001
12002 if (thumb_mode)
12003 inst.instruction |= 0xf9000000;
12004 else
12005 inst.instruction |= 0xf4000000;
12006 }
12007
12008 \f
12009 /* Overall per-instruction processing. */
12010
12011 /* We need to be able to fix up arbitrary expressions in some statements.
12012 This is so that we can handle symbols that are an arbitrary distance from
12013 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
12014 which returns part of an address in a form which will be valid for
12015 a data instruction. We do this by pushing the expression into a symbol
12016 in the expr_section, and creating a fix for that. */
12017
12018 static void
12019 fix_new_arm (fragS * frag,
12020 int where,
12021 short int size,
12022 expressionS * exp,
12023 int pc_rel,
12024 int reloc)
12025 {
12026 fixS * new_fix;
12027
12028 switch (exp->X_op)
12029 {
12030 case O_constant:
12031 case O_symbol:
12032 case O_add:
12033 case O_subtract:
12034 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
12035 break;
12036
12037 default:
12038 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
12039 pc_rel, reloc);
12040 break;
12041 }
12042
12043 /* Mark whether the fix is to a THUMB instruction, or an ARM
12044 instruction. */
12045 new_fix->tc_fix_data = thumb_mode;
12046 }
12047
12048 /* Create a frg for an instruction requiring relaxation. */
12049 static void
12050 output_relax_insn (void)
12051 {
12052 char * to;
12053 symbolS *sym;
12054 int offset;
12055
12056 #ifdef OBJ_ELF
12057 /* The size of the instruction is unknown, so tie the debug info to the
12058 start of the instruction. */
12059 dwarf2_emit_insn (0);
12060 #endif
12061
12062 switch (inst.reloc.exp.X_op)
12063 {
12064 case O_symbol:
12065 sym = inst.reloc.exp.X_add_symbol;
12066 offset = inst.reloc.exp.X_add_number;
12067 break;
12068 case O_constant:
12069 sym = NULL;
12070 offset = inst.reloc.exp.X_add_number;
12071 break;
12072 default:
12073 sym = make_expr_symbol (&inst.reloc.exp);
12074 offset = 0;
12075 break;
12076 }
12077 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
12078 inst.relax, sym, offset, NULL/*offset, opcode*/);
12079 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
12080 }
12081
12082 /* Write a 32-bit thumb instruction to buf. */
12083 static void
12084 put_thumb32_insn (char * buf, unsigned long insn)
12085 {
12086 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
12087 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
12088 }
12089
12090 static void
12091 output_inst (const char * str)
12092 {
12093 char * to = NULL;
12094
12095 if (inst.error)
12096 {
12097 as_bad ("%s -- `%s'", inst.error, str);
12098 return;
12099 }
12100 if (inst.relax) {
12101 output_relax_insn();
12102 return;
12103 }
12104 if (inst.size == 0)
12105 return;
12106
12107 to = frag_more (inst.size);
12108
12109 if (thumb_mode && (inst.size > THUMB_SIZE))
12110 {
12111 assert (inst.size == (2 * THUMB_SIZE));
12112 put_thumb32_insn (to, inst.instruction);
12113 }
12114 else if (inst.size > INSN_SIZE)
12115 {
12116 assert (inst.size == (2 * INSN_SIZE));
12117 md_number_to_chars (to, inst.instruction, INSN_SIZE);
12118 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
12119 }
12120 else
12121 md_number_to_chars (to, inst.instruction, inst.size);
12122
12123 if (inst.reloc.type != BFD_RELOC_UNUSED)
12124 fix_new_arm (frag_now, to - frag_now->fr_literal,
12125 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
12126 inst.reloc.type);
12127
12128 #ifdef OBJ_ELF
12129 dwarf2_emit_insn (inst.size);
12130 #endif
12131 }
12132
12133 /* Tag values used in struct asm_opcode's tag field. */
12134 enum opcode_tag
12135 {
12136 OT_unconditional, /* Instruction cannot be conditionalized.
12137 The ARM condition field is still 0xE. */
12138 OT_unconditionalF, /* Instruction cannot be conditionalized
12139 and carries 0xF in its ARM condition field. */
12140 OT_csuffix, /* Instruction takes a conditional suffix. */
12141 OT_cinfix3, /* Instruction takes a conditional infix,
12142 beginning at character index 3. (In
12143 unified mode, it becomes a suffix.) */
12144 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
12145 tsts, cmps, cmns, and teqs. */
12146 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
12147 character index 3, even in unified mode. Used for
12148 legacy instructions where suffix and infix forms
12149 may be ambiguous. */
12150 OT_csuf_or_in3, /* Instruction takes either a conditional
12151 suffix or an infix at character index 3. */
12152 OT_odd_infix_unc, /* This is the unconditional variant of an
12153 instruction that takes a conditional infix
12154 at an unusual position. In unified mode,
12155 this variant will accept a suffix. */
12156 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
12157 are the conditional variants of instructions that
12158 take conditional infixes in unusual positions.
12159 The infix appears at character index
12160 (tag - OT_odd_infix_0). These are not accepted
12161 in unified mode. */
12162 };
12163
12164 /* Subroutine of md_assemble, responsible for looking up the primary
12165 opcode from the mnemonic the user wrote. STR points to the
12166 beginning of the mnemonic.
12167
12168 This is not simply a hash table lookup, because of conditional
12169 variants. Most instructions have conditional variants, which are
12170 expressed with a _conditional affix_ to the mnemonic. If we were
12171 to encode each conditional variant as a literal string in the opcode
12172 table, it would have approximately 20,000 entries.
12173
12174 Most mnemonics take this affix as a suffix, and in unified syntax,
12175 'most' is upgraded to 'all'. However, in the divided syntax, some
12176 instructions take the affix as an infix, notably the s-variants of
12177 the arithmetic instructions. Of those instructions, all but six
12178 have the infix appear after the third character of the mnemonic.
12179
12180 Accordingly, the algorithm for looking up primary opcodes given
12181 an identifier is:
12182
12183 1. Look up the identifier in the opcode table.
12184 If we find a match, go to step U.
12185
12186 2. Look up the last two characters of the identifier in the
12187 conditions table. If we find a match, look up the first N-2
12188 characters of the identifier in the opcode table. If we
12189 find a match, go to step CE.
12190
12191 3. Look up the fourth and fifth characters of the identifier in
12192 the conditions table. If we find a match, extract those
12193 characters from the identifier, and look up the remaining
12194 characters in the opcode table. If we find a match, go
12195 to step CM.
12196
12197 4. Fail.
12198
12199 U. Examine the tag field of the opcode structure, in case this is
12200 one of the six instructions with its conditional infix in an
12201 unusual place. If it is, the tag tells us where to find the
12202 infix; look it up in the conditions table and set inst.cond
12203 accordingly. Otherwise, this is an unconditional instruction.
12204 Again set inst.cond accordingly. Return the opcode structure.
12205
12206 CE. Examine the tag field to make sure this is an instruction that
12207 should receive a conditional suffix. If it is not, fail.
12208 Otherwise, set inst.cond from the suffix we already looked up,
12209 and return the opcode structure.
12210
12211 CM. Examine the tag field to make sure this is an instruction that
12212 should receive a conditional infix after the third character.
12213 If it is not, fail. Otherwise, undo the edits to the current
12214 line of input and proceed as for case CE. */
12215
12216 static const struct asm_opcode *
12217 opcode_lookup (char **str)
12218 {
12219 char *end, *base;
12220 char *affix;
12221 const struct asm_opcode *opcode;
12222 const struct asm_cond *cond;
12223 char save[2];
12224
12225 /* Scan up to the end of the mnemonic, which must end in white space,
12226 '.' (in unified mode only), or end of string. */
12227 for (base = end = *str; *end != '\0'; end++)
12228 if (*end == ' ' || (unified_syntax && *end == '.'))
12229 break;
12230
12231 if (end == base)
12232 return 0;
12233
12234 /* Handle a possible width suffix and/or Neon type suffix. */
12235 if (end[0] == '.')
12236 {
12237 int offset = 2;
12238
12239 if (end[1] == 'w')
12240 inst.size_req = 4;
12241 else if (end[1] == 'n')
12242 inst.size_req = 2;
12243 else
12244 offset = 0;
12245
12246 inst.vectype.elems = 0;
12247
12248 *str = end + offset;
12249
12250 if (end[offset] == '.')
12251 {
12252 /* See if we have a Neon type suffix. */
12253 if (parse_neon_type (&inst.vectype, str) == FAIL)
12254 return 0;
12255 }
12256 else if (end[offset] != '\0' && end[offset] != ' ')
12257 return 0;
12258 }
12259 else
12260 *str = end;
12261
12262 /* Look for unaffixed or special-case affixed mnemonic. */
12263 opcode = hash_find_n (arm_ops_hsh, base, end - base);
12264 if (opcode)
12265 {
12266 /* step U */
12267 if (opcode->tag < OT_odd_infix_0)
12268 {
12269 inst.cond = COND_ALWAYS;
12270 return opcode;
12271 }
12272
12273 if (unified_syntax)
12274 as_warn (_("conditional infixes are deprecated in unified syntax"));
12275 affix = base + (opcode->tag - OT_odd_infix_0);
12276 cond = hash_find_n (arm_cond_hsh, affix, 2);
12277 assert (cond);
12278
12279 inst.cond = cond->value;
12280 return opcode;
12281 }
12282
12283 /* Cannot have a conditional suffix on a mnemonic of less than two
12284 characters. */
12285 if (end - base < 3)
12286 return 0;
12287
12288 /* Look for suffixed mnemonic. */
12289 affix = end - 2;
12290 cond = hash_find_n (arm_cond_hsh, affix, 2);
12291 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
12292 if (opcode && cond)
12293 {
12294 /* step CE */
12295 switch (opcode->tag)
12296 {
12297 case OT_cinfix3_legacy:
12298 /* Ignore conditional suffixes matched on infix only mnemonics. */
12299 break;
12300
12301 case OT_cinfix3:
12302 case OT_cinfix3_deprecated:
12303 case OT_odd_infix_unc:
12304 if (!unified_syntax)
12305 return 0;
12306 /* else fall through */
12307
12308 case OT_csuffix:
12309 case OT_csuf_or_in3:
12310 inst.cond = cond->value;
12311 return opcode;
12312
12313 case OT_unconditional:
12314 case OT_unconditionalF:
12315 if (thumb_mode)
12316 {
12317 inst.cond = cond->value;
12318 }
12319 else
12320 {
12321 /* delayed diagnostic */
12322 inst.error = BAD_COND;
12323 inst.cond = COND_ALWAYS;
12324 }
12325 return opcode;
12326
12327 default:
12328 return 0;
12329 }
12330 }
12331
12332 /* Cannot have a usual-position infix on a mnemonic of less than
12333 six characters (five would be a suffix). */
12334 if (end - base < 6)
12335 return 0;
12336
12337 /* Look for infixed mnemonic in the usual position. */
12338 affix = base + 3;
12339 cond = hash_find_n (arm_cond_hsh, affix, 2);
12340 if (!cond)
12341 return 0;
12342
12343 memcpy (save, affix, 2);
12344 memmove (affix, affix + 2, (end - affix) - 2);
12345 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
12346 memmove (affix + 2, affix, (end - affix) - 2);
12347 memcpy (affix, save, 2);
12348
12349 if (opcode
12350 && (opcode->tag == OT_cinfix3
12351 || opcode->tag == OT_cinfix3_deprecated
12352 || opcode->tag == OT_csuf_or_in3
12353 || opcode->tag == OT_cinfix3_legacy))
12354 {
12355 /* step CM */
12356 if (unified_syntax
12357 && (opcode->tag == OT_cinfix3
12358 || opcode->tag == OT_cinfix3_deprecated))
12359 as_warn (_("conditional infixes are deprecated in unified syntax"));
12360
12361 inst.cond = cond->value;
12362 return opcode;
12363 }
12364
12365 return 0;
12366 }
12367
12368 void
12369 md_assemble (char *str)
12370 {
12371 char *p = str;
12372 const struct asm_opcode * opcode;
12373
12374 /* Align the previous label if needed. */
12375 if (last_label_seen != NULL)
12376 {
12377 symbol_set_frag (last_label_seen, frag_now);
12378 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
12379 S_SET_SEGMENT (last_label_seen, now_seg);
12380 }
12381
12382 memset (&inst, '\0', sizeof (inst));
12383 inst.reloc.type = BFD_RELOC_UNUSED;
12384
12385 opcode = opcode_lookup (&p);
12386 if (!opcode)
12387 {
12388 /* It wasn't an instruction, but it might be a register alias of
12389 the form alias .req reg, or a Neon .dn/.qn directive. */
12390 if (!create_register_alias (str, p)
12391 && !create_neon_reg_alias (str, p))
12392 as_bad (_("bad instruction `%s'"), str);
12393
12394 return;
12395 }
12396
12397 if (opcode->tag == OT_cinfix3_deprecated)
12398 as_warn (_("s suffix on comparison instruction is deprecated"));
12399
12400 if (thumb_mode)
12401 {
12402 arm_feature_set variant;
12403
12404 variant = cpu_variant;
12405 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12406 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
12407 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
12408 /* Check that this instruction is supported for this CPU. */
12409 if (!opcode->tvariant
12410 || (thumb_mode == 1
12411 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
12412 {
12413 as_bad (_("selected processor does not support `%s'"), str);
12414 return;
12415 }
12416 if (inst.cond != COND_ALWAYS && !unified_syntax
12417 && opcode->tencode != do_t_branch)
12418 {
12419 as_bad (_("Thumb does not support conditional execution"));
12420 return;
12421 }
12422
12423 /* Check conditional suffixes. */
12424 if (current_it_mask)
12425 {
12426 int cond;
12427 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
12428 current_it_mask <<= 1;
12429 current_it_mask &= 0x1f;
12430 /* The BKPT instruction is unconditional even in an IT block. */
12431 if (!inst.error
12432 && cond != inst.cond && opcode->tencode != do_t_bkpt)
12433 {
12434 as_bad (_("incorrect condition in IT block"));
12435 return;
12436 }
12437 }
12438 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
12439 {
12440 as_bad (_("thumb conditional instrunction not in IT block"));
12441 return;
12442 }
12443
12444 mapping_state (MAP_THUMB);
12445 inst.instruction = opcode->tvalue;
12446
12447 if (!parse_operands (p, opcode->operands))
12448 opcode->tencode ();
12449
12450 /* Clear current_it_mask at the end of an IT block. */
12451 if (current_it_mask == 0x10)
12452 current_it_mask = 0;
12453
12454 if (!(inst.error || inst.relax))
12455 {
12456 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
12457 inst.size = (inst.instruction > 0xffff ? 4 : 2);
12458 if (inst.size_req && inst.size_req != inst.size)
12459 {
12460 as_bad (_("cannot honor width suffix -- `%s'"), str);
12461 return;
12462 }
12463 }
12464 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12465 *opcode->tvariant);
12466 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12467 set those bits when Thumb-2 32-bit instructions are seen. ie.
12468 anything other than bl/blx.
12469 This is overly pessimistic for relaxable instructions. */
12470 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
12471 || inst.relax)
12472 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12473 arm_ext_v6t2);
12474 }
12475 else
12476 {
12477 /* Check that this instruction is supported for this CPU. */
12478 if (!opcode->avariant ||
12479 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
12480 {
12481 as_bad (_("selected processor does not support `%s'"), str);
12482 return;
12483 }
12484 if (inst.size_req)
12485 {
12486 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
12487 return;
12488 }
12489
12490 mapping_state (MAP_ARM);
12491 inst.instruction = opcode->avalue;
12492 if (opcode->tag == OT_unconditionalF)
12493 inst.instruction |= 0xF << 28;
12494 else
12495 inst.instruction |= inst.cond << 28;
12496 inst.size = INSN_SIZE;
12497 if (!parse_operands (p, opcode->operands))
12498 opcode->aencode ();
12499 /* Arm mode bx is marked as both v4T and v5 because it's still required
12500 on a hypothetical non-thumb v5 core. */
12501 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
12502 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
12503 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
12504 else
12505 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
12506 *opcode->avariant);
12507 }
12508 output_inst (str);
12509 }
12510
12511 /* Various frobbings of labels and their addresses. */
12512
12513 void
12514 arm_start_line_hook (void)
12515 {
12516 last_label_seen = NULL;
12517 }
12518
12519 void
12520 arm_frob_label (symbolS * sym)
12521 {
12522 last_label_seen = sym;
12523
12524 ARM_SET_THUMB (sym, thumb_mode);
12525
12526 #if defined OBJ_COFF || defined OBJ_ELF
12527 ARM_SET_INTERWORK (sym, support_interwork);
12528 #endif
12529
12530 /* Note - do not allow local symbols (.Lxxx) to be labeled
12531 as Thumb functions. This is because these labels, whilst
12532 they exist inside Thumb code, are not the entry points for
12533 possible ARM->Thumb calls. Also, these labels can be used
12534 as part of a computed goto or switch statement. eg gcc
12535 can generate code that looks like this:
12536
12537 ldr r2, [pc, .Laaa]
12538 lsl r3, r3, #2
12539 ldr r2, [r3, r2]
12540 mov pc, r2
12541
12542 .Lbbb: .word .Lxxx
12543 .Lccc: .word .Lyyy
12544 ..etc...
12545 .Laaa: .word Lbbb
12546
12547 The first instruction loads the address of the jump table.
12548 The second instruction converts a table index into a byte offset.
12549 The third instruction gets the jump address out of the table.
12550 The fourth instruction performs the jump.
12551
12552 If the address stored at .Laaa is that of a symbol which has the
12553 Thumb_Func bit set, then the linker will arrange for this address
12554 to have the bottom bit set, which in turn would mean that the
12555 address computation performed by the third instruction would end
12556 up with the bottom bit set. Since the ARM is capable of unaligned
12557 word loads, the instruction would then load the incorrect address
12558 out of the jump table, and chaos would ensue. */
12559 if (label_is_thumb_function_name
12560 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
12561 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
12562 {
12563 /* When the address of a Thumb function is taken the bottom
12564 bit of that address should be set. This will allow
12565 interworking between Arm and Thumb functions to work
12566 correctly. */
12567
12568 THUMB_SET_FUNC (sym, 1);
12569
12570 label_is_thumb_function_name = FALSE;
12571 }
12572
12573 #ifdef OBJ_ELF
12574 dwarf2_emit_label (sym);
12575 #endif
12576 }
12577
12578 int
12579 arm_data_in_code (void)
12580 {
12581 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
12582 {
12583 *input_line_pointer = '/';
12584 input_line_pointer += 5;
12585 *input_line_pointer = 0;
12586 return 1;
12587 }
12588
12589 return 0;
12590 }
12591
12592 char *
12593 arm_canonicalize_symbol_name (char * name)
12594 {
12595 int len;
12596
12597 if (thumb_mode && (len = strlen (name)) > 5
12598 && streq (name + len - 5, "/data"))
12599 *(name + len - 5) = 0;
12600
12601 return name;
12602 }
12603 \f
12604 /* Table of all register names defined by default. The user can
12605 define additional names with .req. Note that all register names
12606 should appear in both upper and lowercase variants. Some registers
12607 also have mixed-case names. */
12608
12609 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12610 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12611 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12612 #define REGSET(p,t) \
12613 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12614 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12615 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12616 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12617 #define REGSETH(p,t) \
12618 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12619 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12620 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12621 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12622 #define REGSET2(p,t) \
12623 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12624 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12625 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12626 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12627
12628 static const struct reg_entry reg_names[] =
12629 {
12630 /* ARM integer registers. */
12631 REGSET(r, RN), REGSET(R, RN),
12632
12633 /* ATPCS synonyms. */
12634 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
12635 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
12636 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
12637
12638 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
12639 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
12640 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
12641
12642 /* Well-known aliases. */
12643 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
12644 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
12645
12646 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
12647 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
12648
12649 /* Coprocessor numbers. */
12650 REGSET(p, CP), REGSET(P, CP),
12651
12652 /* Coprocessor register numbers. The "cr" variants are for backward
12653 compatibility. */
12654 REGSET(c, CN), REGSET(C, CN),
12655 REGSET(cr, CN), REGSET(CR, CN),
12656
12657 /* FPA registers. */
12658 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
12659 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
12660
12661 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
12662 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
12663
12664 /* VFP SP registers. */
12665 REGSET(s,VFS), REGSET(S,VFS),
12666 REGSETH(s,VFS), REGSETH(S,VFS),
12667
12668 /* VFP DP Registers. */
12669 REGSET(d,VFD), REGSET(D,VFD),
12670 /* Extra Neon DP registers. */
12671 REGSETH(d,VFD), REGSETH(D,VFD),
12672
12673 /* Neon QP registers. */
12674 REGSET2(q,NQ), REGSET2(Q,NQ),
12675
12676 /* VFP control registers. */
12677 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
12678 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
12679
12680 /* Maverick DSP coprocessor registers. */
12681 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
12682 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
12683
12684 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
12685 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
12686 REGDEF(dspsc,0,DSPSC),
12687
12688 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
12689 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
12690 REGDEF(DSPSC,0,DSPSC),
12691
12692 /* iWMMXt data registers - p0, c0-15. */
12693 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
12694
12695 /* iWMMXt control registers - p1, c0-3. */
12696 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
12697 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
12698 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
12699 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
12700
12701 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12702 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
12703 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
12704 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
12705 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
12706
12707 /* XScale accumulator registers. */
12708 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
12709 };
12710 #undef REGDEF
12711 #undef REGNUM
12712 #undef REGSET
12713
12714 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12715 within psr_required_here. */
12716 static const struct asm_psr psrs[] =
12717 {
12718 /* Backward compatibility notation. Note that "all" is no longer
12719 truly all possible PSR bits. */
12720 {"all", PSR_c | PSR_f},
12721 {"flg", PSR_f},
12722 {"ctl", PSR_c},
12723
12724 /* Individual flags. */
12725 {"f", PSR_f},
12726 {"c", PSR_c},
12727 {"x", PSR_x},
12728 {"s", PSR_s},
12729 /* Combinations of flags. */
12730 {"fs", PSR_f | PSR_s},
12731 {"fx", PSR_f | PSR_x},
12732 {"fc", PSR_f | PSR_c},
12733 {"sf", PSR_s | PSR_f},
12734 {"sx", PSR_s | PSR_x},
12735 {"sc", PSR_s | PSR_c},
12736 {"xf", PSR_x | PSR_f},
12737 {"xs", PSR_x | PSR_s},
12738 {"xc", PSR_x | PSR_c},
12739 {"cf", PSR_c | PSR_f},
12740 {"cs", PSR_c | PSR_s},
12741 {"cx", PSR_c | PSR_x},
12742 {"fsx", PSR_f | PSR_s | PSR_x},
12743 {"fsc", PSR_f | PSR_s | PSR_c},
12744 {"fxs", PSR_f | PSR_x | PSR_s},
12745 {"fxc", PSR_f | PSR_x | PSR_c},
12746 {"fcs", PSR_f | PSR_c | PSR_s},
12747 {"fcx", PSR_f | PSR_c | PSR_x},
12748 {"sfx", PSR_s | PSR_f | PSR_x},
12749 {"sfc", PSR_s | PSR_f | PSR_c},
12750 {"sxf", PSR_s | PSR_x | PSR_f},
12751 {"sxc", PSR_s | PSR_x | PSR_c},
12752 {"scf", PSR_s | PSR_c | PSR_f},
12753 {"scx", PSR_s | PSR_c | PSR_x},
12754 {"xfs", PSR_x | PSR_f | PSR_s},
12755 {"xfc", PSR_x | PSR_f | PSR_c},
12756 {"xsf", PSR_x | PSR_s | PSR_f},
12757 {"xsc", PSR_x | PSR_s | PSR_c},
12758 {"xcf", PSR_x | PSR_c | PSR_f},
12759 {"xcs", PSR_x | PSR_c | PSR_s},
12760 {"cfs", PSR_c | PSR_f | PSR_s},
12761 {"cfx", PSR_c | PSR_f | PSR_x},
12762 {"csf", PSR_c | PSR_s | PSR_f},
12763 {"csx", PSR_c | PSR_s | PSR_x},
12764 {"cxf", PSR_c | PSR_x | PSR_f},
12765 {"cxs", PSR_c | PSR_x | PSR_s},
12766 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
12767 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
12768 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
12769 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
12770 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
12771 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
12772 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
12773 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
12774 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
12775 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
12776 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
12777 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
12778 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
12779 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
12780 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
12781 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
12782 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
12783 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
12784 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
12785 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
12786 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
12787 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
12788 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
12789 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
12790 };
12791
12792 /* Table of V7M psr names. */
12793 static const struct asm_psr v7m_psrs[] =
12794 {
12795 {"apsr", 0 },
12796 {"iapsr", 1 },
12797 {"eapsr", 2 },
12798 {"psr", 3 },
12799 {"ipsr", 5 },
12800 {"epsr", 6 },
12801 {"iepsr", 7 },
12802 {"msp", 8 },
12803 {"psp", 9 },
12804 {"primask", 16},
12805 {"basepri", 17},
12806 {"basepri_max", 18},
12807 {"faultmask", 19},
12808 {"control", 20}
12809 };
12810
12811 /* Table of all shift-in-operand names. */
12812 static const struct asm_shift_name shift_names [] =
12813 {
12814 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
12815 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
12816 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
12817 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
12818 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
12819 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
12820 };
12821
12822 /* Table of all explicit relocation names. */
12823 #ifdef OBJ_ELF
12824 static struct reloc_entry reloc_names[] =
12825 {
12826 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
12827 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
12828 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
12829 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
12830 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
12831 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
12832 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
12833 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
12834 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
12835 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
12836 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
12837 };
12838 #endif
12839
12840 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12841 static const struct asm_cond conds[] =
12842 {
12843 {"eq", 0x0},
12844 {"ne", 0x1},
12845 {"cs", 0x2}, {"hs", 0x2},
12846 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12847 {"mi", 0x4},
12848 {"pl", 0x5},
12849 {"vs", 0x6},
12850 {"vc", 0x7},
12851 {"hi", 0x8},
12852 {"ls", 0x9},
12853 {"ge", 0xa},
12854 {"lt", 0xb},
12855 {"gt", 0xc},
12856 {"le", 0xd},
12857 {"al", 0xe}
12858 };
12859
12860 static struct asm_barrier_opt barrier_opt_names[] =
12861 {
12862 { "sy", 0xf },
12863 { "un", 0x7 },
12864 { "st", 0xe },
12865 { "unst", 0x6 }
12866 };
12867
12868 /* Table of ARM-format instructions. */
12869
12870 /* Macros for gluing together operand strings. N.B. In all cases
12871 other than OPS0, the trailing OP_stop comes from default
12872 zero-initialization of the unspecified elements of the array. */
12873 #define OPS0() { OP_stop, }
12874 #define OPS1(a) { OP_##a, }
12875 #define OPS2(a,b) { OP_##a,OP_##b, }
12876 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12877 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12878 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12879 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12880
12881 /* These macros abstract out the exact format of the mnemonic table and
12882 save some repeated characters. */
12883
12884 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12885 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12886 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12887 THUMB_VARIANT, do_##ae, do_##te }
12888
12889 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12890 a T_MNEM_xyz enumerator. */
12891 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12892 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12893 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12894 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12895
12896 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12897 infix after the third character. */
12898 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12899 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12900 THUMB_VARIANT, do_##ae, do_##te }
12901 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
12902 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
12903 THUMB_VARIANT, do_##ae, do_##te }
12904 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12905 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12906 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
12907 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
12908 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12909 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12910 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
12911 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12912
12913 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12914 appear in the condition table. */
12915 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12916 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12917 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12918
12919 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12920 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12921 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12922 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12923 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12924 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
12925 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
12926 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
12927 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
12928 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
12929 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
12930 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
12931 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
12932 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
12933 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
12934 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
12935 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
12936 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
12937 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
12938 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
12939
12940 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
12941 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
12942 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
12943 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
12944
12945 /* Mnemonic that cannot be conditionalized. The ARM condition-code
12946 field is still 0xE. Many of the Thumb variants can be executed
12947 conditionally, so this is checked separately. */
12948 #define TUE(mnem, op, top, nops, ops, ae, te) \
12949 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
12950 THUMB_VARIANT, do_##ae, do_##te }
12951
12952 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
12953 condition code field. */
12954 #define TUF(mnem, op, top, nops, ops, ae, te) \
12955 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
12956 THUMB_VARIANT, do_##ae, do_##te }
12957
12958 /* ARM-only variants of all the above. */
12959 #define CE(mnem, op, nops, ops, ae) \
12960 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12961
12962 #define C3(mnem, op, nops, ops, ae) \
12963 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12964
12965 /* Legacy mnemonics that always have conditional infix after the third
12966 character. */
12967 #define CL(mnem, op, nops, ops, ae) \
12968 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12969 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12970
12971 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
12972 #define cCE(mnem, op, nops, ops, ae) \
12973 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12974
12975 /* Legacy coprocessor instructions where conditional infix and conditional
12976 suffix are ambiguous. For consistency this includes all FPA instructions,
12977 not just the potentially ambiguous ones. */
12978 #define cCL(mnem, op, nops, ops, ae) \
12979 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12980 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12981
12982 /* Coprocessor, takes either a suffix or a position-3 infix
12983 (for an FPA corner case). */
12984 #define C3E(mnem, op, nops, ops, ae) \
12985 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
12986 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12987
12988 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
12989 { #m1 #m2 #m3, OPS##nops ops, \
12990 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12991 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12992
12993 #define CM(m1, m2, op, nops, ops, ae) \
12994 xCM_(m1, , m2, op, nops, ops, ae), \
12995 xCM_(m1, eq, m2, op, nops, ops, ae), \
12996 xCM_(m1, ne, m2, op, nops, ops, ae), \
12997 xCM_(m1, cs, m2, op, nops, ops, ae), \
12998 xCM_(m1, hs, m2, op, nops, ops, ae), \
12999 xCM_(m1, cc, m2, op, nops, ops, ae), \
13000 xCM_(m1, ul, m2, op, nops, ops, ae), \
13001 xCM_(m1, lo, m2, op, nops, ops, ae), \
13002 xCM_(m1, mi, m2, op, nops, ops, ae), \
13003 xCM_(m1, pl, m2, op, nops, ops, ae), \
13004 xCM_(m1, vs, m2, op, nops, ops, ae), \
13005 xCM_(m1, vc, m2, op, nops, ops, ae), \
13006 xCM_(m1, hi, m2, op, nops, ops, ae), \
13007 xCM_(m1, ls, m2, op, nops, ops, ae), \
13008 xCM_(m1, ge, m2, op, nops, ops, ae), \
13009 xCM_(m1, lt, m2, op, nops, ops, ae), \
13010 xCM_(m1, gt, m2, op, nops, ops, ae), \
13011 xCM_(m1, le, m2, op, nops, ops, ae), \
13012 xCM_(m1, al, m2, op, nops, ops, ae)
13013
13014 #define UE(mnem, op, nops, ops, ae) \
13015 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13016
13017 #define UF(mnem, op, nops, ops, ae) \
13018 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13019
13020 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
13021 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
13022 use the same encoding function for each. */
13023 #define NUF(mnem, op, nops, ops, enc) \
13024 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
13025 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13026
13027 /* Neon data processing, version which indirects through neon_enc_tab for
13028 the various overloaded versions of opcodes. */
13029 #define nUF(mnem, op, nops, ops, enc) \
13030 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
13031 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13032
13033 /* Neon insn with conditional suffix for the ARM version, non-overloaded
13034 version. */
13035 #define NCE(mnem, op, nops, ops, enc) \
13036 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
13037 THUMB_VARIANT, do_##enc, do_##enc }
13038
13039 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
13040 #define nCE(mnem, op, nops, ops, enc) \
13041 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
13042 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13043
13044 #define do_0 0
13045
13046 /* Thumb-only, unconditional. */
13047 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13048
13049 static const struct asm_opcode insns[] =
13050 {
13051 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13052 #define THUMB_VARIANT &arm_ext_v4t
13053 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
13054 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
13055 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
13056 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
13057 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
13058 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
13059 tCE(add, 0800000, add, 3, (RR, oRR, SH), arit, t_add_sub),
13060 tC3(adds, 0900000, adds, 3, (RR, oRR, SH), arit, t_add_sub),
13061 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
13062 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
13063 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
13064 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
13065 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
13066 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
13067 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
13068 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
13069
13070 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13071 for setting PSR flag bits. They are obsolete in V6 and do not
13072 have Thumb equivalents. */
13073 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13074 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13075 CL(tstp, 110f000, 2, (RR, SH), cmp),
13076 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13077 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13078 CL(cmpp, 150f000, 2, (RR, SH), cmp),
13079 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13080 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13081 CL(cmnp, 170f000, 2, (RR, SH), cmp),
13082
13083 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
13084 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
13085 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
13086 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
13087
13088 tCE(ldr, 4100000, ldr, 2, (RR, ADDR), ldst, t_ldst),
13089 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDR), ldst, t_ldst),
13090 tCE(str, 4000000, str, 2, (RR, ADDR), ldst, t_ldst),
13091 tC3(strb, 4400000, strb, 2, (RR, ADDR), ldst, t_ldst),
13092
13093 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13094 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13095 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13096 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13097 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13098 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13099
13100 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
13101 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
13102 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
13103 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
13104
13105 /* Pseudo ops. */
13106 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
13107 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
13108 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
13109
13110 /* Thumb-compatibility pseudo ops. */
13111 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
13112 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
13113 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
13114 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
13115 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
13116 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
13117 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
13118 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
13119 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
13120 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
13121 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
13122 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
13123
13124 #undef THUMB_VARIANT
13125 #define THUMB_VARIANT &arm_ext_v6
13126 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
13127
13128 /* V1 instructions with no Thumb analogue prior to V6T2. */
13129 #undef THUMB_VARIANT
13130 #define THUMB_VARIANT &arm_ext_v6t2
13131 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
13132 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
13133 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13134 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13135 CL(teqp, 130f000, 2, (RR, SH), cmp),
13136
13137 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
13138 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
13139 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
13140 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
13141
13142 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13143 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13144
13145 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13146 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13147
13148 /* V1 instructions with no Thumb analogue at all. */
13149 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
13150 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
13151
13152 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
13153 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
13154 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
13155 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
13156 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
13157 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
13158 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
13159 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
13160
13161 #undef ARM_VARIANT
13162 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13163 #undef THUMB_VARIANT
13164 #define THUMB_VARIANT &arm_ext_v4t
13165 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13166 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13167
13168 #undef THUMB_VARIANT
13169 #define THUMB_VARIANT &arm_ext_v6t2
13170 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13171 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
13172
13173 /* Generic coprocessor instructions. */
13174 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13175 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13176 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13177 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13178 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13179 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13180 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13181
13182 #undef ARM_VARIANT
13183 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13184 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13185 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13186
13187 #undef ARM_VARIANT
13188 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13189 TCE(mrs, 10f0000, f3ef8000, 2, (RR, PSR), mrs, t_mrs),
13190 TCE(msr, 120f000, f3808000, 2, (PSR, RR_EXi), msr, t_msr),
13191
13192 #undef ARM_VARIANT
13193 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13194 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13195 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13196 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13197 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13198 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13199 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13200 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13201 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13202
13203 #undef ARM_VARIANT
13204 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13205 #undef THUMB_VARIANT
13206 #define THUMB_VARIANT &arm_ext_v4t
13207 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDR), ldstv4, t_ldst),
13208 tC3(strh, 00000b0, strh, 2, (RR, ADDR), ldstv4, t_ldst),
13209 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13210 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13211 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13212 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13213
13214 #undef ARM_VARIANT
13215 #define ARM_VARIANT &arm_ext_v4t_5
13216 /* ARM Architecture 4T. */
13217 /* Note: bx (and blx) are required on V5, even if the processor does
13218 not support Thumb. */
13219 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
13220
13221 #undef ARM_VARIANT
13222 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13223 #undef THUMB_VARIANT
13224 #define THUMB_VARIANT &arm_ext_v5t
13225 /* Note: blx has 2 variants; the .value coded here is for
13226 BLX(2). Only this variant has conditional execution. */
13227 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
13228 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
13229
13230 #undef THUMB_VARIANT
13231 #define THUMB_VARIANT &arm_ext_v6t2
13232 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
13233 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13234 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13235 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13236 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13237 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13238 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13239 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13240
13241 #undef ARM_VARIANT
13242 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13243 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13244 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13245 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13246 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13247
13248 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13249 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13250
13251 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13252 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13253 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13254 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13255
13256 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13257 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13258 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13259 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13260
13261 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13262 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13263
13264 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13265 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13266 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13267 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13268
13269 #undef ARM_VARIANT
13270 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13271 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
13272 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13273 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13274
13275 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13276 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13277
13278 #undef ARM_VARIANT
13279 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13280 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
13281
13282 #undef ARM_VARIANT
13283 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13284 #undef THUMB_VARIANT
13285 #define THUMB_VARIANT &arm_ext_v6
13286 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
13287 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
13288 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13289 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13290 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13291 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13292 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13293 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13294 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13295 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
13296
13297 #undef THUMB_VARIANT
13298 #define THUMB_VARIANT &arm_ext_v6t2
13299 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
13300 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13301 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13302
13303 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
13304 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
13305
13306 /* ARM V6 not included in V7M (eg. integer SIMD). */
13307 #undef THUMB_VARIANT
13308 #define THUMB_VARIANT &arm_ext_v6_notm
13309 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
13310 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
13311 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
13312 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13313 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13314 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13315 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13316 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13317 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13318 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13319 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13320 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13321 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13322 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13323 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13324 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13325 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13326 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13327 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13328 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13329 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13330 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13331 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13332 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13333 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13334 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13335 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13336 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13337 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13338 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13339 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13340 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13341 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13342 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13343 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13344 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13345 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13346 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13347 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13348 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13349 UF(rfeib, 9900a00, 1, (RRw), rfe),
13350 UF(rfeda, 8100a00, 1, (RRw), rfe),
13351 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13352 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13353 UF(rfefa, 9900a00, 1, (RRw), rfe),
13354 UF(rfeea, 8100a00, 1, (RRw), rfe),
13355 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13356 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13357 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13358 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13359 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13360 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13361 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13362 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13363 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13364 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13365 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13366 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13367 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13368 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13369 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13370 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13371 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13372 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13373 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13374 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13375 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13376 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13377 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13378 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13379 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13380 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13381 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13382 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13383 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
13384 UF(srsib, 9cd0500, 1, (I31w), srs),
13385 UF(srsda, 84d0500, 1, (I31w), srs),
13386 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
13387 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
13388 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
13389 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
13390 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13391 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13392 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
13393
13394 #undef ARM_VARIANT
13395 #define ARM_VARIANT &arm_ext_v6k
13396 #undef THUMB_VARIANT
13397 #define THUMB_VARIANT &arm_ext_v6k
13398 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
13399 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
13400 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
13401 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
13402
13403 #undef THUMB_VARIANT
13404 #define THUMB_VARIANT &arm_ext_v6_notm
13405 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
13406 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
13407
13408 #undef THUMB_VARIANT
13409 #define THUMB_VARIANT &arm_ext_v6t2
13410 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13411 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13412 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13413 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13414 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
13415
13416 #undef ARM_VARIANT
13417 #define ARM_VARIANT &arm_ext_v6z
13418 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
13419
13420 #undef ARM_VARIANT
13421 #define ARM_VARIANT &arm_ext_v6t2
13422 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
13423 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
13424 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13425 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13426
13427 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13428 TCE(movw, 3000000, f2400000, 2, (RRnpc, Iffff), mov16, t_mov16),
13429 TCE(movt, 3400000, f2c00000, 2, (RRnpc, Iffff), mov16, t_mov16),
13430 TCE(rbit, 3ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
13431
13432 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13433 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13434 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13435 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13436
13437 UT(cbnz, b900, 2, (RR, EXP), t_czb),
13438 UT(cbz, b100, 2, (RR, EXP), t_czb),
13439 /* ARM does not really have an IT instruction. */
13440 TUE(it, 0, bf08, 1, (COND), it, t_it),
13441 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
13442 TUE(ite, 0, bf04, 1, (COND), it, t_it),
13443 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
13444 TUE(itet, 0, bf06, 1, (COND), it, t_it),
13445 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
13446 TUE(itee, 0, bf02, 1, (COND), it, t_it),
13447 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
13448 TUE(itett, 0, bf07, 1, (COND), it, t_it),
13449 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
13450 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
13451 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
13452 TUE(itete, 0, bf05, 1, (COND), it, t_it),
13453 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
13454 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
13455
13456 /* Thumb2 only instructions. */
13457 #undef ARM_VARIANT
13458 #define ARM_VARIANT NULL
13459
13460 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13461 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13462 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
13463 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
13464
13465 /* Thumb-2 hardware division instructions (R and M profiles only). */
13466 #undef THUMB_VARIANT
13467 #define THUMB_VARIANT &arm_ext_div
13468 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
13469 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
13470
13471 /* ARM V7 instructions. */
13472 #undef ARM_VARIANT
13473 #define ARM_VARIANT &arm_ext_v7
13474 #undef THUMB_VARIANT
13475 #define THUMB_VARIANT &arm_ext_v7
13476 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
13477 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
13478 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
13479 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
13480 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
13481
13482 #undef ARM_VARIANT
13483 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13484 cCE(wfs, e200110, 1, (RR), rd),
13485 cCE(rfs, e300110, 1, (RR), rd),
13486 cCE(wfc, e400110, 1, (RR), rd),
13487 cCE(rfc, e500110, 1, (RR), rd),
13488
13489 cCL(ldfs, c100100, 2, (RF, ADDR), rd_cpaddr),
13490 cCL(ldfd, c108100, 2, (RF, ADDR), rd_cpaddr),
13491 cCL(ldfe, c500100, 2, (RF, ADDR), rd_cpaddr),
13492 cCL(ldfp, c508100, 2, (RF, ADDR), rd_cpaddr),
13493
13494 cCL(stfs, c000100, 2, (RF, ADDR), rd_cpaddr),
13495 cCL(stfd, c008100, 2, (RF, ADDR), rd_cpaddr),
13496 cCL(stfe, c400100, 2, (RF, ADDR), rd_cpaddr),
13497 cCL(stfp, c408100, 2, (RF, ADDR), rd_cpaddr),
13498
13499 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
13500 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
13501 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
13502 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
13503 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
13504 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
13505 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
13506 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
13507 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
13508 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
13509 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
13510 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
13511
13512 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
13513 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
13514 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
13515 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
13516 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
13517 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
13518 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
13519 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
13520 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
13521 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
13522 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
13523 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
13524
13525 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
13526 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
13527 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
13528 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
13529 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
13530 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
13531 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
13532 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
13533 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
13534 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
13535 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
13536 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
13537
13538 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
13539 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
13540 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
13541 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
13542 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
13543 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
13544 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
13545 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
13546 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
13547 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
13548 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
13549 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
13550
13551 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
13552 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
13553 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
13554 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
13555 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
13556 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
13557 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
13558 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
13559 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
13560 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
13561 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
13562 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
13563
13564 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
13565 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
13566 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
13567 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
13568 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
13569 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
13570 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
13571 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
13572 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
13573 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
13574 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
13575 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
13576
13577 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
13578 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
13579 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
13580 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
13581 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
13582 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
13583 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
13584 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
13585 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
13586 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
13587 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
13588 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
13589
13590 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
13591 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
13592 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
13593 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
13594 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
13595 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
13596 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
13597 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
13598 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
13599 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
13600 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
13601 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
13602
13603 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
13604 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
13605 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
13606 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
13607 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
13608 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
13609 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
13610 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
13611 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
13612 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
13613 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
13614 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
13615
13616 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
13617 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
13618 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
13619 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
13620 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
13621 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
13622 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
13623 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
13624 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
13625 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
13626 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
13627 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
13628
13629 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
13630 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
13631 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
13632 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
13633 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
13634 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
13635 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
13636 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
13637 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
13638 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
13639 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
13640 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
13641
13642 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
13643 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
13644 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
13645 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
13646 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
13647 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
13648 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
13649 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
13650 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
13651 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
13652 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
13653 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
13654
13655 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
13656 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
13657 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
13658 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
13659 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
13660 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
13661 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
13662 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
13663 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
13664 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
13665 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
13666 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
13667
13668 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
13669 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
13670 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
13671 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
13672 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
13673 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
13674 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
13675 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
13676 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
13677 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
13678 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
13679 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
13680
13681 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
13682 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
13683 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
13684 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
13685 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
13686 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
13687 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
13688 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
13689 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
13690 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
13691 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
13692 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
13693
13694 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
13695 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
13696 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
13697 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
13698 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
13699 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
13700 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
13701 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
13702 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
13703 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
13704 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
13705 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
13706
13707 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
13708 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
13709 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
13710 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
13711 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
13712 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13713 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13714 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13715 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
13716 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
13717 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
13718 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
13719
13720 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
13721 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
13722 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
13723 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
13724 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
13725 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13726 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13727 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13728 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
13729 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
13730 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
13731 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
13732
13733 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
13734 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
13735 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
13736 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
13737 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
13738 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13739 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13740 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13741 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
13742 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
13743 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
13744 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
13745
13746 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
13747 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
13748 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
13749 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
13750 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
13751 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13752 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13753 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13754 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
13755 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
13756 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
13757 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
13758
13759 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
13760 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
13761 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
13762 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
13763 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
13764 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13765 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13766 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13767 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
13768 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
13769 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
13770 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
13771
13772 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
13773 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
13774 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
13775 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
13776 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
13777 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13778 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13779 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13780 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
13781 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
13782 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
13783 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
13784
13785 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
13786 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
13787 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
13788 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
13789 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
13790 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13791 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13792 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13793 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
13794 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
13795 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
13796 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
13797
13798 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
13799 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
13800 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
13801 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
13802 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
13803 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13804 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13805 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13806 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
13807 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
13808 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
13809 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
13810
13811 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
13812 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
13813 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
13814 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
13815 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
13816 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13817 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13818 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13819 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
13820 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
13821 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
13822 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
13823
13824 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
13825 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
13826 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
13827 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
13828 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
13829 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13830 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13831 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13832 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
13833 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
13834 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
13835 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
13836
13837 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13838 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13839 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13840 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13841 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13842 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13843 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13844 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13845 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13846 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13847 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13848 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13849
13850 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13851 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13852 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13853 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13854 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13855 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13856 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13857 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13858 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13859 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13860 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13861 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13862
13863 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13864 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13865 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13866 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13867 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13868 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13869 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13870 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13871 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13872 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13873 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13874 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13875
13876 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
13877 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
13878 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
13879 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
13880
13881 cCL(flts, e000110, 2, (RF, RR), rn_rd),
13882 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
13883 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
13884 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
13885 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
13886 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
13887 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
13888 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
13889 cCL(flte, e080110, 2, (RF, RR), rn_rd),
13890 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
13891 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
13892 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
13893
13894 /* The implementation of the FIX instruction is broken on some
13895 assemblers, in that it accepts a precision specifier as well as a
13896 rounding specifier, despite the fact that this is meaningless.
13897 To be more compatible, we accept it as well, though of course it
13898 does not set any bits. */
13899 cCE(fix, e100110, 2, (RR, RF), rd_rm),
13900 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
13901 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
13902 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
13903 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
13904 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
13905 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
13906 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
13907 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
13908 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
13909 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
13910 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
13911 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
13912
13913 /* Instructions that were new with the real FPA, call them V2. */
13914 #undef ARM_VARIANT
13915 #define ARM_VARIANT &fpu_fpa_ext_v2
13916 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13917 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13918 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13919 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13920 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13921 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13922
13923 #undef ARM_VARIANT
13924 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
13925 /* Moves and type conversions. */
13926 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
13927 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
13928 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
13929 cCE(fmstat, ef1fa10, 0, (), noargs),
13930 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
13931 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
13932 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
13933 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13934 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
13935 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13936 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
13937 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
13938
13939 /* Memory operations. */
13940 cCE(flds, d100a00, 2, (RVS, ADDR), vfp_sp_ldst),
13941 cCE(fsts, d000a00, 2, (RVS, ADDR), vfp_sp_ldst),
13942 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13943 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13944 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13945 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13946 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13947 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13948 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13949 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13950 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13951 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13952 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13953 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13954 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13955 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13956 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13957 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13958
13959 /* Monadic operations. */
13960 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
13961 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
13962 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
13963
13964 /* Dyadic operations. */
13965 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13966 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13967 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13968 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13969 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13970 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13971 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13972 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13973 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13974
13975 /* Comparisons. */
13976 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
13977 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
13978 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
13979 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
13980
13981 #undef ARM_VARIANT
13982 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
13983 /* Moves and type conversions. */
13984 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13985 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13986 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13987 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
13988 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
13989 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
13990 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
13991 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13992 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
13993 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13994 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13995 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13996 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13997
13998 /* Memory operations. */
13999 cCE(fldd, d100b00, 2, (RVD, ADDR), vfp_dp_ldst),
14000 cCE(fstd, d000b00, 2, (RVD, ADDR), vfp_dp_ldst),
14001 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14002 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14003 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14004 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14005 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14006 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14007 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14008 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14009
14010 /* Monadic operations. */
14011 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14012 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14013 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14014
14015 /* Dyadic operations. */
14016 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14017 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14018 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14019 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14020 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14021 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14022 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14023 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14024 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14025
14026 /* Comparisons. */
14027 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14028 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
14029 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14030 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
14031
14032 #undef ARM_VARIANT
14033 #define ARM_VARIANT &fpu_vfp_ext_v2
14034 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
14035 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
14036 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
14037 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
14038
14039 #undef THUMB_VARIANT
14040 #define THUMB_VARIANT &fpu_neon_ext_v1
14041 #undef ARM_VARIANT
14042 #define ARM_VARIANT &fpu_neon_ext_v1
14043 /* Data processing with three registers of the same length. */
14044 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
14045 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
14046 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
14047 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14048 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14049 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14050 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14051 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14052 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14053 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14054 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14055 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14056 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14057 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14058 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14059 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14060 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14061 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14062 /* If not immediate, fall back to neon_dyadic_i64_su.
14063 shl_imm should accept I8 I16 I32 I64,
14064 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14065 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
14066 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
14067 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
14068 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
14069 /* Logic ops, types optional & ignored. */
14070 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
14071 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
14072 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
14073 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
14074 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
14075 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
14076 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
14077 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
14078 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
14079 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
14080 /* Bitfield ops, untyped. */
14081 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14082 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14083 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14084 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14085 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14086 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14087 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14088 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14089 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14090 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14091 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14092 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14093 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14094 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14095 back to neon_dyadic_if_su. */
14096 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14097 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14098 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14099 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14100 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14101 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14102 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14103 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14104 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14105 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
14106 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
14107 /* As above, D registers only. */
14108 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14109 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14110 /* Int and float variants, signedness unimportant. */
14111 /* If not scalar, fall back to neon_dyadic_if_i. */
14112 nUF(vmla, vmla, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14113 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14114 nUF(vmls, vmls, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14115 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14116 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
14117 /* Add/sub take types I8 I16 I32 I64 F32. */
14118 nUF(vadd, vadd, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14119 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14120 nUF(vsub, vsub, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14121 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14122 /* vtst takes sizes 8, 16, 32. */
14123 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
14124 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
14125 /* VMUL takes I8 I16 I32 F32 P8. */
14126 nUF(vmul, vmul, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mul),
14127 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
14128 /* VQD{R}MULH takes S16 S32. */
14129 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14130 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14131 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14132 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14133 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14134 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14135 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14136 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14137 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14138 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14139 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14140 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14141 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14142 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14143 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14144 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14145
14146 /* Two address, int/float. Types S8 S16 S32 F32. */
14147 NUF(vabs, 1b10300, 2, (RNDQ, RNDQ), neon_abs_neg),
14148 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
14149 NUF(vneg, 1b10380, 2, (RNDQ, RNDQ), neon_abs_neg),
14150 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
14151
14152 /* Data processing with two registers and a shift amount. */
14153 /* Right shifts, and variants with rounding.
14154 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14155 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14156 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14157 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14158 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14159 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14160 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14161 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14162 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14163 /* Shift and insert. Sizes accepted 8 16 32 64. */
14164 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
14165 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
14166 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
14167 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
14168 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14169 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
14170 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
14171 /* Right shift immediate, saturating & narrowing, with rounding variants.
14172 Types accepted S16 S32 S64 U16 U32 U64. */
14173 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14174 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14175 /* As above, unsigned. Types accepted S16 S32 S64. */
14176 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14177 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14178 /* Right shift narrowing. Types accepted I16 I32 I64. */
14179 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14180 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14181 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14182 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
14183 /* CVT with optional immediate for fixed-point variant. */
14184 nUF(vcvt, vcvt, 3, (RNDQ, RNDQ, oI32b), neon_cvt),
14185 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
14186
14187 /* One register and an immediate value. All encoding special-cased! */
14188 #undef THUMB_VARIANT
14189 #define THUMB_VARIANT &fpu_vfp_ext_v1
14190 #undef ARM_VARIANT
14191 #define ARM_VARIANT &fpu_vfp_ext_v1
14192 NCE(vmov, 0, 1, (VMOV), neon_mov),
14193
14194 #undef THUMB_VARIANT
14195 #define THUMB_VARIANT &fpu_neon_ext_v1
14196 #undef ARM_VARIANT
14197 #define ARM_VARIANT &fpu_neon_ext_v1
14198 NCE(vmovq, 0, 1, (VMOV), neon_mov),
14199 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
14200 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
14201
14202 /* Data processing, three registers of different lengths. */
14203 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14204 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
14205 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
14206 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
14207 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
14208 /* If not scalar, fall back to neon_dyadic_long.
14209 Vector types as above, scalar types S16 S32 U16 U32. */
14210 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14211 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14212 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14213 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14214 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14215 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14216 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14217 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14218 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14219 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14220 /* Saturating doubling multiplies. Types S16 S32. */
14221 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14222 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14223 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14224 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14225 S16 S32 U16 U32. */
14226 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
14227
14228 /* Extract. Size 8. */
14229 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
14230 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
14231
14232 /* Two registers, miscellaneous. */
14233 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14234 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
14235 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
14236 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
14237 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
14238 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
14239 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
14240 /* Vector replicate. Sizes 8 16 32. */
14241 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
14242 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
14243 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14244 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
14245 /* VMOVN. Types I16 I32 I64. */
14246 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
14247 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14248 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
14249 /* VQMOVUN. Types S16 S32 S64. */
14250 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
14251 /* VZIP / VUZP. Sizes 8 16 32. */
14252 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
14253 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
14254 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
14255 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
14256 /* VQABS / VQNEG. Types S8 S16 S32. */
14257 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14258 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
14259 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14260 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
14261 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14262 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
14263 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
14264 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
14265 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
14266 /* Reciprocal estimates. Types U32 F32. */
14267 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
14268 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
14269 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
14270 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
14271 /* VCLS. Types S8 S16 S32. */
14272 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
14273 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
14274 /* VCLZ. Types I8 I16 I32. */
14275 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
14276 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
14277 /* VCNT. Size 8. */
14278 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
14279 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
14280 /* Two address, untyped. */
14281 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
14282 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
14283 /* VTRN. Sizes 8 16 32. */
14284 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
14285 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
14286
14287 /* Table lookup. Size 8. */
14288 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14289 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14290
14291 #undef THUMB_VARIANT
14292 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
14293 #undef ARM_VARIANT
14294 #define ARM_VARIANT &fpu_vfp_ext_v1xd
14295
14296 /* Load/store instructions. Available in Neon or VFPv3. */
14297 NCE(vldm, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14298 NCE(vldmia, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14299 NCE(vldmdb, d100b00, 2, (RRw, NRDLST), neon_ldm_stm),
14300 NCE(vstm, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14301 NCE(vstmia, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14302 NCE(vstmdb, d000b00, 2, (RRw, NRDLST), neon_ldm_stm),
14303 NCE(vldr, d100b00, 2, (RND, ADDR), neon_ldr_str),
14304 NCE(vstr, d000b00, 2, (RND, ADDR), neon_ldr_str),
14305
14306 #undef THUMB_VARIANT
14307 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14308 #undef ARM_VARIANT
14309 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14310
14311 /* Neon element/structure load/store. */
14312 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14313 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14314 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14315 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14316 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14317 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14318 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14319 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14320
14321 #undef THUMB_VARIANT
14322 #define THUMB_VARIANT &fpu_vfp_ext_v3
14323 #undef ARM_VARIANT
14324 #define ARM_VARIANT &fpu_vfp_ext_v3
14325
14326 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
14327 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
14328 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14329 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14330 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14331 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14332 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14333 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14334 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14335 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14336 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14337 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14338 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14339 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14340 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14341 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14342 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14343 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14344
14345 #undef THUMB_VARIANT
14346 #undef ARM_VARIANT
14347 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14348 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14349 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14350 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14351 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14352 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14353 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14354 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
14355 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
14356
14357 #undef ARM_VARIANT
14358 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14359 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
14360 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
14361 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
14362 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
14363 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
14364 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
14365 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
14366 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
14367 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
14368 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14369 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14370 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14371 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14372 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14373 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14374 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14375 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14376 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14377 cCE(tmcr, e000110, 2, (RIWC, RR), rn_rd),
14378 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
14379 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14380 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14381 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14382 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14383 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14384 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14385 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
14386 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
14387 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
14388 cCE(tmrc, e100110, 2, (RR, RIWC), rd_rn),
14389 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
14390 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
14391 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
14392 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
14393 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
14394 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
14395 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
14396 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14397 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14398 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14399 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14400 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14401 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14402 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14403 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14404 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14405 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
14406 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14407 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14408 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14409 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14410 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14411 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14412 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14413 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14414 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14415 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14416 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14417 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14418 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14419 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14420 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14421 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14422 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14423 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14424 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14425 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14426 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14427 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14428 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14429 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14430 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14431 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14432 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14433 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14434 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14435 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14436 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14437 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14438 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14439 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14440 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14441 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14442 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14443 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14444 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14445 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14446 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14447 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
14448 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14449 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14450 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14451 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14452 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14453 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14454 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14455 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14456 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14457 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14458 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14459 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14460 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14461 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14462 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14463 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14464 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14465 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14466 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14467 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14468 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14469 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
14470 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14471 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14472 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14473 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14474 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14475 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14476 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14477 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14478 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14479 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14480 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14481 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14482 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14483 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14484 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14485 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14486 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14487 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14488 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14489 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14490 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14491 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14492 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14493 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14494 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14495 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14496 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14497 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14498 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14499 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14500 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14501 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
14502 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
14503 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
14504 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
14505 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
14506 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
14507 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14508 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14509 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14510 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
14511 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
14512 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
14513 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
14514 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
14515 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
14516 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14517 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14518 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14519 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14520 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
14521
14522 #undef ARM_VARIANT
14523 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14524 cCE(cfldrs, c100400, 2, (RMF, ADDR), rd_cpaddr),
14525 cCE(cfldrd, c500400, 2, (RMD, ADDR), rd_cpaddr),
14526 cCE(cfldr32, c100500, 2, (RMFX, ADDR), rd_cpaddr),
14527 cCE(cfldr64, c500500, 2, (RMDX, ADDR), rd_cpaddr),
14528 cCE(cfstrs, c000400, 2, (RMF, ADDR), rd_cpaddr),
14529 cCE(cfstrd, c400400, 2, (RMD, ADDR), rd_cpaddr),
14530 cCE(cfstr32, c000500, 2, (RMFX, ADDR), rd_cpaddr),
14531 cCE(cfstr64, c400500, 2, (RMDX, ADDR), rd_cpaddr),
14532 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
14533 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
14534 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
14535 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
14536 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
14537 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
14538 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
14539 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
14540 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
14541 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
14542 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
14543 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
14544 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
14545 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
14546 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
14547 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
14548 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
14549 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
14550 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
14551 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
14552 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
14553 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
14554 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
14555 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
14556 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
14557 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
14558 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
14559 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
14560 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
14561 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
14562 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
14563 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
14564 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
14565 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
14566 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
14567 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
14568 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
14569 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
14570 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
14571 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
14572 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
14573 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
14574 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
14575 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
14576 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
14577 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
14578 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
14579 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
14580 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
14581 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
14582 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
14583 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
14584 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
14585 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
14586 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
14587 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
14588 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14589 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14590 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14591 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14592 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14593 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14594 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14595 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14596 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14597 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14598 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14599 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14600 };
14601 #undef ARM_VARIANT
14602 #undef THUMB_VARIANT
14603 #undef TCE
14604 #undef TCM
14605 #undef TUE
14606 #undef TUF
14607 #undef TCC
14608 #undef cCE
14609 #undef cCL
14610 #undef C3E
14611 #undef CE
14612 #undef CM
14613 #undef UE
14614 #undef UF
14615 #undef UT
14616 #undef NUF
14617 #undef nUF
14618 #undef NCE
14619 #undef nCE
14620 #undef OPS0
14621 #undef OPS1
14622 #undef OPS2
14623 #undef OPS3
14624 #undef OPS4
14625 #undef OPS5
14626 #undef OPS6
14627 #undef do_0
14628 \f
14629 /* MD interface: bits in the object file. */
14630
14631 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14632 for use in the a.out file, and stores them in the array pointed to by buf.
14633 This knows about the endian-ness of the target machine and does
14634 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14635 2 (short) and 4 (long) Floating numbers are put out as a series of
14636 LITTLENUMS (shorts, here at least). */
14637
14638 void
14639 md_number_to_chars (char * buf, valueT val, int n)
14640 {
14641 if (target_big_endian)
14642 number_to_chars_bigendian (buf, val, n);
14643 else
14644 number_to_chars_littleendian (buf, val, n);
14645 }
14646
14647 static valueT
14648 md_chars_to_number (char * buf, int n)
14649 {
14650 valueT result = 0;
14651 unsigned char * where = (unsigned char *) buf;
14652
14653 if (target_big_endian)
14654 {
14655 while (n--)
14656 {
14657 result <<= 8;
14658 result |= (*where++ & 255);
14659 }
14660 }
14661 else
14662 {
14663 while (n--)
14664 {
14665 result <<= 8;
14666 result |= (where[n] & 255);
14667 }
14668 }
14669
14670 return result;
14671 }
14672
14673 /* MD interface: Sections. */
14674
14675 /* Estimate the size of a frag before relaxing. Assume everything fits in
14676 2 bytes. */
14677
14678 int
14679 md_estimate_size_before_relax (fragS * fragp,
14680 segT segtype ATTRIBUTE_UNUSED)
14681 {
14682 fragp->fr_var = 2;
14683 return 2;
14684 }
14685
14686 /* Convert a machine dependent frag. */
14687
14688 void
14689 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
14690 {
14691 unsigned long insn;
14692 unsigned long old_op;
14693 char *buf;
14694 expressionS exp;
14695 fixS *fixp;
14696 int reloc_type;
14697 int pc_rel;
14698 int opcode;
14699
14700 buf = fragp->fr_literal + fragp->fr_fix;
14701
14702 old_op = bfd_get_16(abfd, buf);
14703 if (fragp->fr_symbol) {
14704 exp.X_op = O_symbol;
14705 exp.X_add_symbol = fragp->fr_symbol;
14706 } else {
14707 exp.X_op = O_constant;
14708 }
14709 exp.X_add_number = fragp->fr_offset;
14710 opcode = fragp->fr_subtype;
14711 switch (opcode)
14712 {
14713 case T_MNEM_ldr_pc:
14714 case T_MNEM_ldr_pc2:
14715 case T_MNEM_ldr_sp:
14716 case T_MNEM_str_sp:
14717 case T_MNEM_ldr:
14718 case T_MNEM_ldrb:
14719 case T_MNEM_ldrh:
14720 case T_MNEM_str:
14721 case T_MNEM_strb:
14722 case T_MNEM_strh:
14723 if (fragp->fr_var == 4)
14724 {
14725 insn = THUMB_OP32(opcode);
14726 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
14727 {
14728 insn |= (old_op & 0x700) << 4;
14729 }
14730 else
14731 {
14732 insn |= (old_op & 7) << 12;
14733 insn |= (old_op & 0x38) << 13;
14734 }
14735 insn |= 0x00000c00;
14736 put_thumb32_insn (buf, insn);
14737 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
14738 }
14739 else
14740 {
14741 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
14742 }
14743 pc_rel = (opcode == T_MNEM_ldr_pc2);
14744 break;
14745 case T_MNEM_adr:
14746 if (fragp->fr_var == 4)
14747 {
14748 insn = THUMB_OP32 (opcode);
14749 insn |= (old_op & 0xf0) << 4;
14750 put_thumb32_insn (buf, insn);
14751 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
14752 }
14753 else
14754 {
14755 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14756 exp.X_add_number -= 4;
14757 }
14758 pc_rel = 1;
14759 break;
14760 case T_MNEM_mov:
14761 case T_MNEM_movs:
14762 case T_MNEM_cmp:
14763 case T_MNEM_cmn:
14764 if (fragp->fr_var == 4)
14765 {
14766 int r0off = (opcode == T_MNEM_mov
14767 || opcode == T_MNEM_movs) ? 0 : 8;
14768 insn = THUMB_OP32 (opcode);
14769 insn = (insn & 0xe1ffffff) | 0x10000000;
14770 insn |= (old_op & 0x700) << r0off;
14771 put_thumb32_insn (buf, insn);
14772 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14773 }
14774 else
14775 {
14776 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
14777 }
14778 pc_rel = 0;
14779 break;
14780 case T_MNEM_b:
14781 if (fragp->fr_var == 4)
14782 {
14783 insn = THUMB_OP32(opcode);
14784 put_thumb32_insn (buf, insn);
14785 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
14786 }
14787 else
14788 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
14789 pc_rel = 1;
14790 break;
14791 case T_MNEM_bcond:
14792 if (fragp->fr_var == 4)
14793 {
14794 insn = THUMB_OP32(opcode);
14795 insn |= (old_op & 0xf00) << 14;
14796 put_thumb32_insn (buf, insn);
14797 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
14798 }
14799 else
14800 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
14801 pc_rel = 1;
14802 break;
14803 case T_MNEM_add_sp:
14804 case T_MNEM_add_pc:
14805 case T_MNEM_inc_sp:
14806 case T_MNEM_dec_sp:
14807 if (fragp->fr_var == 4)
14808 {
14809 /* ??? Choose between add and addw. */
14810 insn = THUMB_OP32 (opcode);
14811 insn |= (old_op & 0xf0) << 4;
14812 put_thumb32_insn (buf, insn);
14813 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14814 }
14815 else
14816 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14817 pc_rel = 0;
14818 break;
14819
14820 case T_MNEM_addi:
14821 case T_MNEM_addis:
14822 case T_MNEM_subi:
14823 case T_MNEM_subis:
14824 if (fragp->fr_var == 4)
14825 {
14826 insn = THUMB_OP32 (opcode);
14827 insn |= (old_op & 0xf0) << 4;
14828 insn |= (old_op & 0xf) << 16;
14829 put_thumb32_insn (buf, insn);
14830 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14831 }
14832 else
14833 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14834 pc_rel = 0;
14835 break;
14836 default:
14837 abort();
14838 }
14839 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
14840 reloc_type);
14841 fixp->fx_file = fragp->fr_file;
14842 fixp->fx_line = fragp->fr_line;
14843 fragp->fr_fix += fragp->fr_var;
14844 }
14845
14846 /* Return the size of a relaxable immediate operand instruction.
14847 SHIFT and SIZE specify the form of the allowable immediate. */
14848 static int
14849 relax_immediate (fragS *fragp, int size, int shift)
14850 {
14851 offsetT offset;
14852 offsetT mask;
14853 offsetT low;
14854
14855 /* ??? Should be able to do better than this. */
14856 if (fragp->fr_symbol)
14857 return 4;
14858
14859 low = (1 << shift) - 1;
14860 mask = (1 << (shift + size)) - (1 << shift);
14861 offset = fragp->fr_offset;
14862 /* Force misaligned offsets to 32-bit variant. */
14863 if (offset & low)
14864 return -4;
14865 if (offset & ~mask)
14866 return 4;
14867 return 2;
14868 }
14869
14870 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14871 load. */
14872 static int
14873 relax_adr (fragS *fragp, asection *sec)
14874 {
14875 addressT addr;
14876 offsetT val;
14877
14878 /* Assume worst case for symbols not known to be in the same section. */
14879 if (!S_IS_DEFINED(fragp->fr_symbol)
14880 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14881 return 4;
14882
14883 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14884 addr = fragp->fr_address + fragp->fr_fix;
14885 addr = (addr + 4) & ~3;
14886 /* Fix the insn as the 4-byte version if the target address is not
14887 sufficiently aligned. This is prevents an infinite loop when two
14888 instructions have contradictory range/alignment requirements. */
14889 if (val & 3)
14890 return -4;
14891 val -= addr;
14892 if (val < 0 || val > 1020)
14893 return 4;
14894 return 2;
14895 }
14896
14897 /* Return the size of a relaxable add/sub immediate instruction. */
14898 static int
14899 relax_addsub (fragS *fragp, asection *sec)
14900 {
14901 char *buf;
14902 int op;
14903
14904 buf = fragp->fr_literal + fragp->fr_fix;
14905 op = bfd_get_16(sec->owner, buf);
14906 if ((op & 0xf) == ((op >> 4) & 0xf))
14907 return relax_immediate (fragp, 8, 0);
14908 else
14909 return relax_immediate (fragp, 3, 0);
14910 }
14911
14912
14913 /* Return the size of a relaxable branch instruction. BITS is the
14914 size of the offset field in the narrow instruction. */
14915
14916 static int
14917 relax_branch (fragS *fragp, asection *sec, int bits)
14918 {
14919 addressT addr;
14920 offsetT val;
14921 offsetT limit;
14922
14923 /* Assume worst case for symbols not known to be in the same section. */
14924 if (!S_IS_DEFINED(fragp->fr_symbol)
14925 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14926 return 4;
14927
14928 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14929 addr = fragp->fr_address + fragp->fr_fix + 4;
14930 val -= addr;
14931
14932 /* Offset is a signed value *2 */
14933 limit = 1 << bits;
14934 if (val >= limit || val < -limit)
14935 return 4;
14936 return 2;
14937 }
14938
14939
14940 /* Relax a machine dependent frag. This returns the amount by which
14941 the current size of the frag should change. */
14942
14943 int
14944 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
14945 {
14946 int oldsize;
14947 int newsize;
14948
14949 oldsize = fragp->fr_var;
14950 switch (fragp->fr_subtype)
14951 {
14952 case T_MNEM_ldr_pc2:
14953 newsize = relax_adr(fragp, sec);
14954 break;
14955 case T_MNEM_ldr_pc:
14956 case T_MNEM_ldr_sp:
14957 case T_MNEM_str_sp:
14958 newsize = relax_immediate(fragp, 8, 2);
14959 break;
14960 case T_MNEM_ldr:
14961 case T_MNEM_str:
14962 newsize = relax_immediate(fragp, 5, 2);
14963 break;
14964 case T_MNEM_ldrh:
14965 case T_MNEM_strh:
14966 newsize = relax_immediate(fragp, 5, 1);
14967 break;
14968 case T_MNEM_ldrb:
14969 case T_MNEM_strb:
14970 newsize = relax_immediate(fragp, 5, 0);
14971 break;
14972 case T_MNEM_adr:
14973 newsize = relax_adr(fragp, sec);
14974 break;
14975 case T_MNEM_mov:
14976 case T_MNEM_movs:
14977 case T_MNEM_cmp:
14978 case T_MNEM_cmn:
14979 newsize = relax_immediate(fragp, 8, 0);
14980 break;
14981 case T_MNEM_b:
14982 newsize = relax_branch(fragp, sec, 11);
14983 break;
14984 case T_MNEM_bcond:
14985 newsize = relax_branch(fragp, sec, 8);
14986 break;
14987 case T_MNEM_add_sp:
14988 case T_MNEM_add_pc:
14989 newsize = relax_immediate (fragp, 8, 2);
14990 break;
14991 case T_MNEM_inc_sp:
14992 case T_MNEM_dec_sp:
14993 newsize = relax_immediate (fragp, 7, 2);
14994 break;
14995 case T_MNEM_addi:
14996 case T_MNEM_addis:
14997 case T_MNEM_subi:
14998 case T_MNEM_subis:
14999 newsize = relax_addsub (fragp, sec);
15000 break;
15001 default:
15002 abort();
15003 }
15004 if (newsize < 0)
15005 {
15006 fragp->fr_var = -newsize;
15007 md_convert_frag (sec->owner, sec, fragp);
15008 frag_wane(fragp);
15009 return -(newsize + oldsize);
15010 }
15011 fragp->fr_var = newsize;
15012 return newsize - oldsize;
15013 }
15014
15015 /* Round up a section size to the appropriate boundary. */
15016
15017 valueT
15018 md_section_align (segT segment ATTRIBUTE_UNUSED,
15019 valueT size)
15020 {
15021 #ifdef OBJ_ELF
15022 return size;
15023 #else
15024 /* Round all sects to multiple of 4. */
15025 return (size + 3) & ~3;
15026 #endif
15027 }
15028
15029 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
15030 of an rs_align_code fragment. */
15031
15032 void
15033 arm_handle_align (fragS * fragP)
15034 {
15035 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
15036 static char const thumb_noop[2] = { 0xc0, 0x46 };
15037 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
15038 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
15039
15040 int bytes, fix, noop_size;
15041 char * p;
15042 const char * noop;
15043
15044 if (fragP->fr_type != rs_align_code)
15045 return;
15046
15047 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
15048 p = fragP->fr_literal + fragP->fr_fix;
15049 fix = 0;
15050
15051 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
15052 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
15053
15054 if (fragP->tc_frag_data)
15055 {
15056 if (target_big_endian)
15057 noop = thumb_bigend_noop;
15058 else
15059 noop = thumb_noop;
15060 noop_size = sizeof (thumb_noop);
15061 }
15062 else
15063 {
15064 if (target_big_endian)
15065 noop = arm_bigend_noop;
15066 else
15067 noop = arm_noop;
15068 noop_size = sizeof (arm_noop);
15069 }
15070
15071 if (bytes & (noop_size - 1))
15072 {
15073 fix = bytes & (noop_size - 1);
15074 memset (p, 0, fix);
15075 p += fix;
15076 bytes -= fix;
15077 }
15078
15079 while (bytes >= noop_size)
15080 {
15081 memcpy (p, noop, noop_size);
15082 p += noop_size;
15083 bytes -= noop_size;
15084 fix += noop_size;
15085 }
15086
15087 fragP->fr_fix += fix;
15088 fragP->fr_var = noop_size;
15089 }
15090
15091 /* Called from md_do_align. Used to create an alignment
15092 frag in a code section. */
15093
15094 void
15095 arm_frag_align_code (int n, int max)
15096 {
15097 char * p;
15098
15099 /* We assume that there will never be a requirement
15100 to support alignments greater than 32 bytes. */
15101 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
15102 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15103
15104 p = frag_var (rs_align_code,
15105 MAX_MEM_FOR_RS_ALIGN_CODE,
15106 1,
15107 (relax_substateT) max,
15108 (symbolS *) NULL,
15109 (offsetT) n,
15110 (char *) NULL);
15111 *p = 0;
15112 }
15113
15114 /* Perform target specific initialisation of a frag. */
15115
15116 void
15117 arm_init_frag (fragS * fragP)
15118 {
15119 /* Record whether this frag is in an ARM or a THUMB area. */
15120 fragP->tc_frag_data = thumb_mode;
15121 }
15122
15123 #ifdef OBJ_ELF
15124 /* When we change sections we need to issue a new mapping symbol. */
15125
15126 void
15127 arm_elf_change_section (void)
15128 {
15129 flagword flags;
15130 segment_info_type *seginfo;
15131
15132 /* Link an unlinked unwind index table section to the .text section. */
15133 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
15134 && elf_linked_to_section (now_seg) == NULL)
15135 elf_linked_to_section (now_seg) = text_section;
15136
15137 if (!SEG_NORMAL (now_seg))
15138 return;
15139
15140 flags = bfd_get_section_flags (stdoutput, now_seg);
15141
15142 /* We can ignore sections that only contain debug info. */
15143 if ((flags & SEC_ALLOC) == 0)
15144 return;
15145
15146 seginfo = seg_info (now_seg);
15147 mapstate = seginfo->tc_segment_info_data.mapstate;
15148 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
15149 }
15150
15151 int
15152 arm_elf_section_type (const char * str, size_t len)
15153 {
15154 if (len == 5 && strncmp (str, "exidx", 5) == 0)
15155 return SHT_ARM_EXIDX;
15156
15157 return -1;
15158 }
15159 \f
15160 /* Code to deal with unwinding tables. */
15161
15162 static void add_unwind_adjustsp (offsetT);
15163
15164 /* Cenerate and deferred unwind frame offset. */
15165
15166 static void
15167 flush_pending_unwind (void)
15168 {
15169 offsetT offset;
15170
15171 offset = unwind.pending_offset;
15172 unwind.pending_offset = 0;
15173 if (offset != 0)
15174 add_unwind_adjustsp (offset);
15175 }
15176
15177 /* Add an opcode to this list for this function. Two-byte opcodes should
15178 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15179 order. */
15180
15181 static void
15182 add_unwind_opcode (valueT op, int length)
15183 {
15184 /* Add any deferred stack adjustment. */
15185 if (unwind.pending_offset)
15186 flush_pending_unwind ();
15187
15188 unwind.sp_restored = 0;
15189
15190 if (unwind.opcode_count + length > unwind.opcode_alloc)
15191 {
15192 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
15193 if (unwind.opcodes)
15194 unwind.opcodes = xrealloc (unwind.opcodes,
15195 unwind.opcode_alloc);
15196 else
15197 unwind.opcodes = xmalloc (unwind.opcode_alloc);
15198 }
15199 while (length > 0)
15200 {
15201 length--;
15202 unwind.opcodes[unwind.opcode_count] = op & 0xff;
15203 op >>= 8;
15204 unwind.opcode_count++;
15205 }
15206 }
15207
15208 /* Add unwind opcodes to adjust the stack pointer. */
15209
15210 static void
15211 add_unwind_adjustsp (offsetT offset)
15212 {
15213 valueT op;
15214
15215 if (offset > 0x200)
15216 {
15217 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15218 char bytes[5];
15219 int n;
15220 valueT o;
15221
15222 /* Long form: 0xb2, uleb128. */
15223 /* This might not fit in a word so add the individual bytes,
15224 remembering the list is built in reverse order. */
15225 o = (valueT) ((offset - 0x204) >> 2);
15226 if (o == 0)
15227 add_unwind_opcode (0, 1);
15228
15229 /* Calculate the uleb128 encoding of the offset. */
15230 n = 0;
15231 while (o)
15232 {
15233 bytes[n] = o & 0x7f;
15234 o >>= 7;
15235 if (o)
15236 bytes[n] |= 0x80;
15237 n++;
15238 }
15239 /* Add the insn. */
15240 for (; n; n--)
15241 add_unwind_opcode (bytes[n - 1], 1);
15242 add_unwind_opcode (0xb2, 1);
15243 }
15244 else if (offset > 0x100)
15245 {
15246 /* Two short opcodes. */
15247 add_unwind_opcode (0x3f, 1);
15248 op = (offset - 0x104) >> 2;
15249 add_unwind_opcode (op, 1);
15250 }
15251 else if (offset > 0)
15252 {
15253 /* Short opcode. */
15254 op = (offset - 4) >> 2;
15255 add_unwind_opcode (op, 1);
15256 }
15257 else if (offset < 0)
15258 {
15259 offset = -offset;
15260 while (offset > 0x100)
15261 {
15262 add_unwind_opcode (0x7f, 1);
15263 offset -= 0x100;
15264 }
15265 op = ((offset - 4) >> 2) | 0x40;
15266 add_unwind_opcode (op, 1);
15267 }
15268 }
15269
15270 /* Finish the list of unwind opcodes for this function. */
15271 static void
15272 finish_unwind_opcodes (void)
15273 {
15274 valueT op;
15275
15276 if (unwind.fp_used)
15277 {
15278 /* Adjust sp as necessary. */
15279 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
15280 flush_pending_unwind ();
15281
15282 /* After restoring sp from the frame pointer. */
15283 op = 0x90 | unwind.fp_reg;
15284 add_unwind_opcode (op, 1);
15285 }
15286 else
15287 flush_pending_unwind ();
15288 }
15289
15290
15291 /* Start an exception table entry. If idx is nonzero this is an index table
15292 entry. */
15293
15294 static void
15295 start_unwind_section (const segT text_seg, int idx)
15296 {
15297 const char * text_name;
15298 const char * prefix;
15299 const char * prefix_once;
15300 const char * group_name;
15301 size_t prefix_len;
15302 size_t text_len;
15303 char * sec_name;
15304 size_t sec_name_len;
15305 int type;
15306 int flags;
15307 int linkonce;
15308
15309 if (idx)
15310 {
15311 prefix = ELF_STRING_ARM_unwind;
15312 prefix_once = ELF_STRING_ARM_unwind_once;
15313 type = SHT_ARM_EXIDX;
15314 }
15315 else
15316 {
15317 prefix = ELF_STRING_ARM_unwind_info;
15318 prefix_once = ELF_STRING_ARM_unwind_info_once;
15319 type = SHT_PROGBITS;
15320 }
15321
15322 text_name = segment_name (text_seg);
15323 if (streq (text_name, ".text"))
15324 text_name = "";
15325
15326 if (strncmp (text_name, ".gnu.linkonce.t.",
15327 strlen (".gnu.linkonce.t.")) == 0)
15328 {
15329 prefix = prefix_once;
15330 text_name += strlen (".gnu.linkonce.t.");
15331 }
15332
15333 prefix_len = strlen (prefix);
15334 text_len = strlen (text_name);
15335 sec_name_len = prefix_len + text_len;
15336 sec_name = xmalloc (sec_name_len + 1);
15337 memcpy (sec_name, prefix, prefix_len);
15338 memcpy (sec_name + prefix_len, text_name, text_len);
15339 sec_name[prefix_len + text_len] = '\0';
15340
15341 flags = SHF_ALLOC;
15342 linkonce = 0;
15343 group_name = 0;
15344
15345 /* Handle COMDAT group. */
15346 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
15347 {
15348 group_name = elf_group_name (text_seg);
15349 if (group_name == NULL)
15350 {
15351 as_bad ("Group section `%s' has no group signature",
15352 segment_name (text_seg));
15353 ignore_rest_of_line ();
15354 return;
15355 }
15356 flags |= SHF_GROUP;
15357 linkonce = 1;
15358 }
15359
15360 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
15361
15362 /* Set the setion link for index tables. */
15363 if (idx)
15364 elf_linked_to_section (now_seg) = text_seg;
15365 }
15366
15367
15368 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15369 personality routine data. Returns zero, or the index table value for
15370 and inline entry. */
15371
15372 static valueT
15373 create_unwind_entry (int have_data)
15374 {
15375 int size;
15376 addressT where;
15377 char *ptr;
15378 /* The current word of data. */
15379 valueT data;
15380 /* The number of bytes left in this word. */
15381 int n;
15382
15383 finish_unwind_opcodes ();
15384
15385 /* Remember the current text section. */
15386 unwind.saved_seg = now_seg;
15387 unwind.saved_subseg = now_subseg;
15388
15389 start_unwind_section (now_seg, 0);
15390
15391 if (unwind.personality_routine == NULL)
15392 {
15393 if (unwind.personality_index == -2)
15394 {
15395 if (have_data)
15396 as_bad (_("handerdata in cantunwind frame"));
15397 return 1; /* EXIDX_CANTUNWIND. */
15398 }
15399
15400 /* Use a default personality routine if none is specified. */
15401 if (unwind.personality_index == -1)
15402 {
15403 if (unwind.opcode_count > 3)
15404 unwind.personality_index = 1;
15405 else
15406 unwind.personality_index = 0;
15407 }
15408
15409 /* Space for the personality routine entry. */
15410 if (unwind.personality_index == 0)
15411 {
15412 if (unwind.opcode_count > 3)
15413 as_bad (_("too many unwind opcodes for personality routine 0"));
15414
15415 if (!have_data)
15416 {
15417 /* All the data is inline in the index table. */
15418 data = 0x80;
15419 n = 3;
15420 while (unwind.opcode_count > 0)
15421 {
15422 unwind.opcode_count--;
15423 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15424 n--;
15425 }
15426
15427 /* Pad with "finish" opcodes. */
15428 while (n--)
15429 data = (data << 8) | 0xb0;
15430
15431 return data;
15432 }
15433 size = 0;
15434 }
15435 else
15436 /* We get two opcodes "free" in the first word. */
15437 size = unwind.opcode_count - 2;
15438 }
15439 else
15440 /* An extra byte is required for the opcode count. */
15441 size = unwind.opcode_count + 1;
15442
15443 size = (size + 3) >> 2;
15444 if (size > 0xff)
15445 as_bad (_("too many unwind opcodes"));
15446
15447 frag_align (2, 0, 0);
15448 record_alignment (now_seg, 2);
15449 unwind.table_entry = expr_build_dot ();
15450
15451 /* Allocate the table entry. */
15452 ptr = frag_more ((size << 2) + 4);
15453 where = frag_now_fix () - ((size << 2) + 4);
15454
15455 switch (unwind.personality_index)
15456 {
15457 case -1:
15458 /* ??? Should this be a PLT generating relocation? */
15459 /* Custom personality routine. */
15460 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
15461 BFD_RELOC_ARM_PREL31);
15462
15463 where += 4;
15464 ptr += 4;
15465
15466 /* Set the first byte to the number of additional words. */
15467 data = size - 1;
15468 n = 3;
15469 break;
15470
15471 /* ABI defined personality routines. */
15472 case 0:
15473 /* Three opcodes bytes are packed into the first word. */
15474 data = 0x80;
15475 n = 3;
15476 break;
15477
15478 case 1:
15479 case 2:
15480 /* The size and first two opcode bytes go in the first word. */
15481 data = ((0x80 + unwind.personality_index) << 8) | size;
15482 n = 2;
15483 break;
15484
15485 default:
15486 /* Should never happen. */
15487 abort ();
15488 }
15489
15490 /* Pack the opcodes into words (MSB first), reversing the list at the same
15491 time. */
15492 while (unwind.opcode_count > 0)
15493 {
15494 if (n == 0)
15495 {
15496 md_number_to_chars (ptr, data, 4);
15497 ptr += 4;
15498 n = 4;
15499 data = 0;
15500 }
15501 unwind.opcode_count--;
15502 n--;
15503 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15504 }
15505
15506 /* Finish off the last word. */
15507 if (n < 4)
15508 {
15509 /* Pad with "finish" opcodes. */
15510 while (n--)
15511 data = (data << 8) | 0xb0;
15512
15513 md_number_to_chars (ptr, data, 4);
15514 }
15515
15516 if (!have_data)
15517 {
15518 /* Add an empty descriptor if there is no user-specified data. */
15519 ptr = frag_more (4);
15520 md_number_to_chars (ptr, 0, 4);
15521 }
15522
15523 return 0;
15524 }
15525
15526 /* Convert REGNAME to a DWARF-2 register number. */
15527
15528 int
15529 tc_arm_regname_to_dw2regnum (const char *regname)
15530 {
15531 int reg = arm_reg_parse ((char **) &regname, REG_TYPE_RN);
15532
15533 if (reg == FAIL)
15534 return -1;
15535
15536 return reg;
15537 }
15538
15539 /* Initialize the DWARF-2 unwind information for this procedure. */
15540
15541 void
15542 tc_arm_frame_initial_instructions (void)
15543 {
15544 cfi_add_CFA_def_cfa (REG_SP, 0);
15545 }
15546 #endif /* OBJ_ELF */
15547
15548
15549 /* MD interface: Symbol and relocation handling. */
15550
15551 /* Return the address within the segment that a PC-relative fixup is
15552 relative to. For ARM, PC-relative fixups applied to instructions
15553 are generally relative to the location of the fixup plus 8 bytes.
15554 Thumb branches are offset by 4, and Thumb loads relative to PC
15555 require special handling. */
15556
15557 long
15558 md_pcrel_from_section (fixS * fixP, segT seg)
15559 {
15560 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
15561
15562 /* If this is pc-relative and we are going to emit a relocation
15563 then we just want to put out any pipeline compensation that the linker
15564 will need. Otherwise we want to use the calculated base.
15565 For WinCE we skip the bias for externals as well, since this
15566 is how the MS ARM-CE assembler behaves and we want to be compatible. */
15567 if (fixP->fx_pcrel
15568 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
15569 || (arm_force_relocation (fixP)
15570 #ifdef TE_WINCE
15571 && !S_IS_EXTERNAL (fixP->fx_addsy)
15572 #endif
15573 )))
15574 base = 0;
15575
15576 switch (fixP->fx_r_type)
15577 {
15578 /* PC relative addressing on the Thumb is slightly odd as the
15579 bottom two bits of the PC are forced to zero for the
15580 calculation. This happens *after* application of the
15581 pipeline offset. However, Thumb adrl already adjusts for
15582 this, so we need not do it again. */
15583 case BFD_RELOC_ARM_THUMB_ADD:
15584 return base & ~3;
15585
15586 case BFD_RELOC_ARM_THUMB_OFFSET:
15587 case BFD_RELOC_ARM_T32_OFFSET_IMM:
15588 case BFD_RELOC_ARM_T32_ADD_PC12:
15589 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
15590 return (base + 4) & ~3;
15591
15592 /* Thumb branches are simply offset by +4. */
15593 case BFD_RELOC_THUMB_PCREL_BRANCH7:
15594 case BFD_RELOC_THUMB_PCREL_BRANCH9:
15595 case BFD_RELOC_THUMB_PCREL_BRANCH12:
15596 case BFD_RELOC_THUMB_PCREL_BRANCH20:
15597 case BFD_RELOC_THUMB_PCREL_BRANCH23:
15598 case BFD_RELOC_THUMB_PCREL_BRANCH25:
15599 case BFD_RELOC_THUMB_PCREL_BLX:
15600 return base + 4;
15601
15602 /* ARM mode branches are offset by +8. However, the Windows CE
15603 loader expects the relocation not to take this into account. */
15604 case BFD_RELOC_ARM_PCREL_BRANCH:
15605 case BFD_RELOC_ARM_PCREL_CALL:
15606 case BFD_RELOC_ARM_PCREL_JUMP:
15607 case BFD_RELOC_ARM_PCREL_BLX:
15608 case BFD_RELOC_ARM_PLT32:
15609 #ifdef TE_WINCE
15610 /* When handling fixups immediately, because we have already
15611 discovered the value of a symbol, or the address of the frag involved
15612 we must account for the offset by +8, as the OS loader will never see the reloc.
15613 see fixup_segment() in write.c
15614 The S_IS_EXTERNAL test handles the case of global symbols.
15615 Those need the calculated base, not just the pipe compensation the linker will need. */
15616 if (fixP->fx_pcrel
15617 && fixP->fx_addsy != NULL
15618 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
15619 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
15620 return base + 8;
15621 return base;
15622 #else
15623 return base + 8;
15624 #endif
15625
15626 /* ARM mode loads relative to PC are also offset by +8. Unlike
15627 branches, the Windows CE loader *does* expect the relocation
15628 to take this into account. */
15629 case BFD_RELOC_ARM_OFFSET_IMM:
15630 case BFD_RELOC_ARM_OFFSET_IMM8:
15631 case BFD_RELOC_ARM_HWLITERAL:
15632 case BFD_RELOC_ARM_LITERAL:
15633 case BFD_RELOC_ARM_CP_OFF_IMM:
15634 return base + 8;
15635
15636
15637 /* Other PC-relative relocations are un-offset. */
15638 default:
15639 return base;
15640 }
15641 }
15642
15643 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15644 Otherwise we have no need to default values of symbols. */
15645
15646 symbolS *
15647 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
15648 {
15649 #ifdef OBJ_ELF
15650 if (name[0] == '_' && name[1] == 'G'
15651 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
15652 {
15653 if (!GOT_symbol)
15654 {
15655 if (symbol_find (name))
15656 as_bad ("GOT already in the symbol table");
15657
15658 GOT_symbol = symbol_new (name, undefined_section,
15659 (valueT) 0, & zero_address_frag);
15660 }
15661
15662 return GOT_symbol;
15663 }
15664 #endif
15665
15666 return 0;
15667 }
15668
15669 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15670 computed as two separate immediate values, added together. We
15671 already know that this value cannot be computed by just one ARM
15672 instruction. */
15673
15674 static unsigned int
15675 validate_immediate_twopart (unsigned int val,
15676 unsigned int * highpart)
15677 {
15678 unsigned int a;
15679 unsigned int i;
15680
15681 for (i = 0; i < 32; i += 2)
15682 if (((a = rotate_left (val, i)) & 0xff) != 0)
15683 {
15684 if (a & 0xff00)
15685 {
15686 if (a & ~ 0xffff)
15687 continue;
15688 * highpart = (a >> 8) | ((i + 24) << 7);
15689 }
15690 else if (a & 0xff0000)
15691 {
15692 if (a & 0xff000000)
15693 continue;
15694 * highpart = (a >> 16) | ((i + 16) << 7);
15695 }
15696 else
15697 {
15698 assert (a & 0xff000000);
15699 * highpart = (a >> 24) | ((i + 8) << 7);
15700 }
15701
15702 return (a & 0xff) | (i << 7);
15703 }
15704
15705 return FAIL;
15706 }
15707
15708 static int
15709 validate_offset_imm (unsigned int val, int hwse)
15710 {
15711 if ((hwse && val > 255) || val > 4095)
15712 return FAIL;
15713 return val;
15714 }
15715
15716 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15717 negative immediate constant by altering the instruction. A bit of
15718 a hack really.
15719 MOV <-> MVN
15720 AND <-> BIC
15721 ADC <-> SBC
15722 by inverting the second operand, and
15723 ADD <-> SUB
15724 CMP <-> CMN
15725 by negating the second operand. */
15726
15727 static int
15728 negate_data_op (unsigned long * instruction,
15729 unsigned long value)
15730 {
15731 int op, new_inst;
15732 unsigned long negated, inverted;
15733
15734 negated = encode_arm_immediate (-value);
15735 inverted = encode_arm_immediate (~value);
15736
15737 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
15738 switch (op)
15739 {
15740 /* First negates. */
15741 case OPCODE_SUB: /* ADD <-> SUB */
15742 new_inst = OPCODE_ADD;
15743 value = negated;
15744 break;
15745
15746 case OPCODE_ADD:
15747 new_inst = OPCODE_SUB;
15748 value = negated;
15749 break;
15750
15751 case OPCODE_CMP: /* CMP <-> CMN */
15752 new_inst = OPCODE_CMN;
15753 value = negated;
15754 break;
15755
15756 case OPCODE_CMN:
15757 new_inst = OPCODE_CMP;
15758 value = negated;
15759 break;
15760
15761 /* Now Inverted ops. */
15762 case OPCODE_MOV: /* MOV <-> MVN */
15763 new_inst = OPCODE_MVN;
15764 value = inverted;
15765 break;
15766
15767 case OPCODE_MVN:
15768 new_inst = OPCODE_MOV;
15769 value = inverted;
15770 break;
15771
15772 case OPCODE_AND: /* AND <-> BIC */
15773 new_inst = OPCODE_BIC;
15774 value = inverted;
15775 break;
15776
15777 case OPCODE_BIC:
15778 new_inst = OPCODE_AND;
15779 value = inverted;
15780 break;
15781
15782 case OPCODE_ADC: /* ADC <-> SBC */
15783 new_inst = OPCODE_SBC;
15784 value = inverted;
15785 break;
15786
15787 case OPCODE_SBC:
15788 new_inst = OPCODE_ADC;
15789 value = inverted;
15790 break;
15791
15792 /* We cannot do anything. */
15793 default:
15794 return FAIL;
15795 }
15796
15797 if (value == (unsigned) FAIL)
15798 return FAIL;
15799
15800 *instruction &= OPCODE_MASK;
15801 *instruction |= new_inst << DATA_OP_SHIFT;
15802 return value;
15803 }
15804
15805 /* Like negate_data_op, but for Thumb-2. */
15806
15807 static unsigned int
15808 thumb32_negate_data_op (offsetT *instruction, offsetT value)
15809 {
15810 int op, new_inst;
15811 int rd;
15812 offsetT negated, inverted;
15813
15814 negated = encode_thumb32_immediate (-value);
15815 inverted = encode_thumb32_immediate (~value);
15816
15817 rd = (*instruction >> 8) & 0xf;
15818 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
15819 switch (op)
15820 {
15821 /* ADD <-> SUB. Includes CMP <-> CMN. */
15822 case T2_OPCODE_SUB:
15823 new_inst = T2_OPCODE_ADD;
15824 value = negated;
15825 break;
15826
15827 case T2_OPCODE_ADD:
15828 new_inst = T2_OPCODE_SUB;
15829 value = negated;
15830 break;
15831
15832 /* ORR <-> ORN. Includes MOV <-> MVN. */
15833 case T2_OPCODE_ORR:
15834 new_inst = T2_OPCODE_ORN;
15835 value = inverted;
15836 break;
15837
15838 case T2_OPCODE_ORN:
15839 new_inst = T2_OPCODE_ORR;
15840 value = inverted;
15841 break;
15842
15843 /* AND <-> BIC. TST has no inverted equivalent. */
15844 case T2_OPCODE_AND:
15845 new_inst = T2_OPCODE_BIC;
15846 if (rd == 15)
15847 value = FAIL;
15848 else
15849 value = inverted;
15850 break;
15851
15852 case T2_OPCODE_BIC:
15853 new_inst = T2_OPCODE_AND;
15854 value = inverted;
15855 break;
15856
15857 /* ADC <-> SBC */
15858 case T2_OPCODE_ADC:
15859 new_inst = T2_OPCODE_SBC;
15860 value = inverted;
15861 break;
15862
15863 case T2_OPCODE_SBC:
15864 new_inst = T2_OPCODE_ADC;
15865 value = inverted;
15866 break;
15867
15868 /* We cannot do anything. */
15869 default:
15870 return FAIL;
15871 }
15872
15873 if (value == FAIL)
15874 return FAIL;
15875
15876 *instruction &= T2_OPCODE_MASK;
15877 *instruction |= new_inst << T2_DATA_OP_SHIFT;
15878 return value;
15879 }
15880
15881 /* Read a 32-bit thumb instruction from buf. */
15882 static unsigned long
15883 get_thumb32_insn (char * buf)
15884 {
15885 unsigned long insn;
15886 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
15887 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15888
15889 return insn;
15890 }
15891
15892
15893 /* We usually want to set the low bit on the address of thumb function
15894 symbols. In particular .word foo - . should have the low bit set.
15895 Generic code tries to fold the difference of two symbols to
15896 a constant. Prevent this and force a relocation when the first symbols
15897 is a thumb function. */
15898 int
15899 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
15900 {
15901 if (op == O_subtract
15902 && l->X_op == O_symbol
15903 && r->X_op == O_symbol
15904 && THUMB_IS_FUNC (l->X_add_symbol))
15905 {
15906 l->X_op = O_subtract;
15907 l->X_op_symbol = r->X_add_symbol;
15908 l->X_add_number -= r->X_add_number;
15909 return 1;
15910 }
15911 /* Process as normal. */
15912 return 0;
15913 }
15914
15915 void
15916 md_apply_fix (fixS * fixP,
15917 valueT * valP,
15918 segT seg)
15919 {
15920 offsetT value = * valP;
15921 offsetT newval;
15922 unsigned int newimm;
15923 unsigned long temp;
15924 int sign;
15925 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
15926
15927 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
15928
15929 /* Note whether this will delete the relocation. */
15930 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
15931 fixP->fx_done = 1;
15932
15933 /* On a 64-bit host, silently truncate 'value' to 32 bits for
15934 consistency with the behavior on 32-bit hosts. Remember value
15935 for emit_reloc. */
15936 value &= 0xffffffff;
15937 value ^= 0x80000000;
15938 value -= 0x80000000;
15939
15940 *valP = value;
15941 fixP->fx_addnumber = value;
15942
15943 /* Same treatment for fixP->fx_offset. */
15944 fixP->fx_offset &= 0xffffffff;
15945 fixP->fx_offset ^= 0x80000000;
15946 fixP->fx_offset -= 0x80000000;
15947
15948 switch (fixP->fx_r_type)
15949 {
15950 case BFD_RELOC_NONE:
15951 /* This will need to go in the object file. */
15952 fixP->fx_done = 0;
15953 break;
15954
15955 case BFD_RELOC_ARM_IMMEDIATE:
15956 /* We claim that this fixup has been processed here,
15957 even if in fact we generate an error because we do
15958 not have a reloc for it, so tc_gen_reloc will reject it. */
15959 fixP->fx_done = 1;
15960
15961 if (fixP->fx_addsy
15962 && ! S_IS_DEFINED (fixP->fx_addsy))
15963 {
15964 as_bad_where (fixP->fx_file, fixP->fx_line,
15965 _("undefined symbol %s used as an immediate value"),
15966 S_GET_NAME (fixP->fx_addsy));
15967 break;
15968 }
15969
15970 newimm = encode_arm_immediate (value);
15971 temp = md_chars_to_number (buf, INSN_SIZE);
15972
15973 /* If the instruction will fail, see if we can fix things up by
15974 changing the opcode. */
15975 if (newimm == (unsigned int) FAIL
15976 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
15977 {
15978 as_bad_where (fixP->fx_file, fixP->fx_line,
15979 _("invalid constant (%lx) after fixup"),
15980 (unsigned long) value);
15981 break;
15982 }
15983
15984 newimm |= (temp & 0xfffff000);
15985 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
15986 break;
15987
15988 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
15989 {
15990 unsigned int highpart = 0;
15991 unsigned int newinsn = 0xe1a00000; /* nop. */
15992
15993 newimm = encode_arm_immediate (value);
15994 temp = md_chars_to_number (buf, INSN_SIZE);
15995
15996 /* If the instruction will fail, see if we can fix things up by
15997 changing the opcode. */
15998 if (newimm == (unsigned int) FAIL
15999 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
16000 {
16001 /* No ? OK - try using two ADD instructions to generate
16002 the value. */
16003 newimm = validate_immediate_twopart (value, & highpart);
16004
16005 /* Yes - then make sure that the second instruction is
16006 also an add. */
16007 if (newimm != (unsigned int) FAIL)
16008 newinsn = temp;
16009 /* Still No ? Try using a negated value. */
16010 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
16011 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
16012 /* Otherwise - give up. */
16013 else
16014 {
16015 as_bad_where (fixP->fx_file, fixP->fx_line,
16016 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
16017 (long) value);
16018 break;
16019 }
16020
16021 /* Replace the first operand in the 2nd instruction (which
16022 is the PC) with the destination register. We have
16023 already added in the PC in the first instruction and we
16024 do not want to do it again. */
16025 newinsn &= ~ 0xf0000;
16026 newinsn |= ((newinsn & 0x0f000) << 4);
16027 }
16028
16029 newimm |= (temp & 0xfffff000);
16030 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
16031
16032 highpart |= (newinsn & 0xfffff000);
16033 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
16034 }
16035 break;
16036
16037 case BFD_RELOC_ARM_OFFSET_IMM:
16038 if (!fixP->fx_done && seg->use_rela_p)
16039 value = 0;
16040
16041 case BFD_RELOC_ARM_LITERAL:
16042 sign = value >= 0;
16043
16044 if (value < 0)
16045 value = - value;
16046
16047 if (validate_offset_imm (value, 0) == FAIL)
16048 {
16049 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
16050 as_bad_where (fixP->fx_file, fixP->fx_line,
16051 _("invalid literal constant: pool needs to be closer"));
16052 else
16053 as_bad_where (fixP->fx_file, fixP->fx_line,
16054 _("bad immediate value for offset (%ld)"),
16055 (long) value);
16056 break;
16057 }
16058
16059 newval = md_chars_to_number (buf, INSN_SIZE);
16060 newval &= 0xff7ff000;
16061 newval |= value | (sign ? INDEX_UP : 0);
16062 md_number_to_chars (buf, newval, INSN_SIZE);
16063 break;
16064
16065 case BFD_RELOC_ARM_OFFSET_IMM8:
16066 case BFD_RELOC_ARM_HWLITERAL:
16067 sign = value >= 0;
16068
16069 if (value < 0)
16070 value = - value;
16071
16072 if (validate_offset_imm (value, 1) == FAIL)
16073 {
16074 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
16075 as_bad_where (fixP->fx_file, fixP->fx_line,
16076 _("invalid literal constant: pool needs to be closer"));
16077 else
16078 as_bad (_("bad immediate value for half-word offset (%ld)"),
16079 (long) value);
16080 break;
16081 }
16082
16083 newval = md_chars_to_number (buf, INSN_SIZE);
16084 newval &= 0xff7ff0f0;
16085 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
16086 md_number_to_chars (buf, newval, INSN_SIZE);
16087 break;
16088
16089 case BFD_RELOC_ARM_T32_OFFSET_U8:
16090 if (value < 0 || value > 1020 || value % 4 != 0)
16091 as_bad_where (fixP->fx_file, fixP->fx_line,
16092 _("bad immediate value for offset (%ld)"), (long) value);
16093 value /= 4;
16094
16095 newval = md_chars_to_number (buf+2, THUMB_SIZE);
16096 newval |= value;
16097 md_number_to_chars (buf+2, newval, THUMB_SIZE);
16098 break;
16099
16100 case BFD_RELOC_ARM_T32_OFFSET_IMM:
16101 /* This is a complicated relocation used for all varieties of Thumb32
16102 load/store instruction with immediate offset:
16103
16104 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16105 *4, optional writeback(W)
16106 (doubleword load/store)
16107
16108 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16109 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16110 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16111 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16112 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16113
16114 Uppercase letters indicate bits that are already encoded at
16115 this point. Lowercase letters are our problem. For the
16116 second block of instructions, the secondary opcode nybble
16117 (bits 8..11) is present, and bit 23 is zero, even if this is
16118 a PC-relative operation. */
16119 newval = md_chars_to_number (buf, THUMB_SIZE);
16120 newval <<= 16;
16121 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
16122
16123 if ((newval & 0xf0000000) == 0xe0000000)
16124 {
16125 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16126 if (value >= 0)
16127 newval |= (1 << 23);
16128 else
16129 value = -value;
16130 if (value % 4 != 0)
16131 {
16132 as_bad_where (fixP->fx_file, fixP->fx_line,
16133 _("offset not a multiple of 4"));
16134 break;
16135 }
16136 value /= 4;
16137 if (value > 0xff)
16138 {
16139 as_bad_where (fixP->fx_file, fixP->fx_line,
16140 _("offset out of range"));
16141 break;
16142 }
16143 newval &= ~0xff;
16144 }
16145 else if ((newval & 0x000f0000) == 0x000f0000)
16146 {
16147 /* PC-relative, 12-bit offset. */
16148 if (value >= 0)
16149 newval |= (1 << 23);
16150 else
16151 value = -value;
16152 if (value > 0xfff)
16153 {
16154 as_bad_where (fixP->fx_file, fixP->fx_line,
16155 _("offset out of range"));
16156 break;
16157 }
16158 newval &= ~0xfff;
16159 }
16160 else if ((newval & 0x00000100) == 0x00000100)
16161 {
16162 /* Writeback: 8-bit, +/- offset. */
16163 if (value >= 0)
16164 newval |= (1 << 9);
16165 else
16166 value = -value;
16167 if (value > 0xff)
16168 {
16169 as_bad_where (fixP->fx_file, fixP->fx_line,
16170 _("offset out of range"));
16171 break;
16172 }
16173 newval &= ~0xff;
16174 }
16175 else if ((newval & 0x00000f00) == 0x00000e00)
16176 {
16177 /* T-instruction: positive 8-bit offset. */
16178 if (value < 0 || value > 0xff)
16179 {
16180 as_bad_where (fixP->fx_file, fixP->fx_line,
16181 _("offset out of range"));
16182 break;
16183 }
16184 newval &= ~0xff;
16185 newval |= value;
16186 }
16187 else
16188 {
16189 /* Positive 12-bit or negative 8-bit offset. */
16190 int limit;
16191 if (value >= 0)
16192 {
16193 newval |= (1 << 23);
16194 limit = 0xfff;
16195 }
16196 else
16197 {
16198 value = -value;
16199 limit = 0xff;
16200 }
16201 if (value > limit)
16202 {
16203 as_bad_where (fixP->fx_file, fixP->fx_line,
16204 _("offset out of range"));
16205 break;
16206 }
16207 newval &= ~limit;
16208 }
16209
16210 newval |= value;
16211 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
16212 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
16213 break;
16214
16215 case BFD_RELOC_ARM_SHIFT_IMM:
16216 newval = md_chars_to_number (buf, INSN_SIZE);
16217 if (((unsigned long) value) > 32
16218 || (value == 32
16219 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
16220 {
16221 as_bad_where (fixP->fx_file, fixP->fx_line,
16222 _("shift expression is too large"));
16223 break;
16224 }
16225
16226 if (value == 0)
16227 /* Shifts of zero must be done as lsl. */
16228 newval &= ~0x60;
16229 else if (value == 32)
16230 value = 0;
16231 newval &= 0xfffff07f;
16232 newval |= (value & 0x1f) << 7;
16233 md_number_to_chars (buf, newval, INSN_SIZE);
16234 break;
16235
16236 case BFD_RELOC_ARM_T32_IMMEDIATE:
16237 case BFD_RELOC_ARM_T32_IMM12:
16238 case BFD_RELOC_ARM_T32_ADD_PC12:
16239 /* We claim that this fixup has been processed here,
16240 even if in fact we generate an error because we do
16241 not have a reloc for it, so tc_gen_reloc will reject it. */
16242 fixP->fx_done = 1;
16243
16244 if (fixP->fx_addsy
16245 && ! S_IS_DEFINED (fixP->fx_addsy))
16246 {
16247 as_bad_where (fixP->fx_file, fixP->fx_line,
16248 _("undefined symbol %s used as an immediate value"),
16249 S_GET_NAME (fixP->fx_addsy));
16250 break;
16251 }
16252
16253 newval = md_chars_to_number (buf, THUMB_SIZE);
16254 newval <<= 16;
16255 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
16256
16257 /* FUTURE: Implement analogue of negate_data_op for T32. */
16258 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE)
16259 {
16260 newimm = encode_thumb32_immediate (value);
16261 if (newimm == (unsigned int) FAIL)
16262 newimm = thumb32_negate_data_op (&newval, value);
16263 }
16264 else
16265 {
16266 /* 12 bit immediate for addw/subw. */
16267 if (value < 0)
16268 {
16269 value = -value;
16270 newval ^= 0x00a00000;
16271 }
16272 if (value > 0xfff)
16273 newimm = (unsigned int) FAIL;
16274 else
16275 newimm = value;
16276 }
16277
16278 if (newimm == (unsigned int)FAIL)
16279 {
16280 as_bad_where (fixP->fx_file, fixP->fx_line,
16281 _("invalid constant (%lx) after fixup"),
16282 (unsigned long) value);
16283 break;
16284 }
16285
16286 newval |= (newimm & 0x800) << 15;
16287 newval |= (newimm & 0x700) << 4;
16288 newval |= (newimm & 0x0ff);
16289
16290 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
16291 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
16292 break;
16293
16294 case BFD_RELOC_ARM_SMC:
16295 if (((unsigned long) value) > 0xffff)
16296 as_bad_where (fixP->fx_file, fixP->fx_line,
16297 _("invalid smc expression"));
16298 newval = md_chars_to_number (buf, INSN_SIZE);
16299 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
16300 md_number_to_chars (buf, newval, INSN_SIZE);
16301 break;
16302
16303 case BFD_RELOC_ARM_SWI:
16304 if (fixP->tc_fix_data != 0)
16305 {
16306 if (((unsigned long) value) > 0xff)
16307 as_bad_where (fixP->fx_file, fixP->fx_line,
16308 _("invalid swi expression"));
16309 newval = md_chars_to_number (buf, THUMB_SIZE);
16310 newval |= value;
16311 md_number_to_chars (buf, newval, THUMB_SIZE);
16312 }
16313 else
16314 {
16315 if (((unsigned long) value) > 0x00ffffff)
16316 as_bad_where (fixP->fx_file, fixP->fx_line,
16317 _("invalid swi expression"));
16318 newval = md_chars_to_number (buf, INSN_SIZE);
16319 newval |= value;
16320 md_number_to_chars (buf, newval, INSN_SIZE);
16321 }
16322 break;
16323
16324 case BFD_RELOC_ARM_MULTI:
16325 if (((unsigned long) value) > 0xffff)
16326 as_bad_where (fixP->fx_file, fixP->fx_line,
16327 _("invalid expression in load/store multiple"));
16328 newval = value | md_chars_to_number (buf, INSN_SIZE);
16329 md_number_to_chars (buf, newval, INSN_SIZE);
16330 break;
16331
16332 #ifdef OBJ_ELF
16333 case BFD_RELOC_ARM_PCREL_CALL:
16334 newval = md_chars_to_number (buf, INSN_SIZE);
16335 if ((newval & 0xf0000000) == 0xf0000000)
16336 temp = 1;
16337 else
16338 temp = 3;
16339 goto arm_branch_common;
16340
16341 case BFD_RELOC_ARM_PCREL_JUMP:
16342 case BFD_RELOC_ARM_PLT32:
16343 #endif
16344 case BFD_RELOC_ARM_PCREL_BRANCH:
16345 temp = 3;
16346 goto arm_branch_common;
16347
16348 case BFD_RELOC_ARM_PCREL_BLX:
16349 temp = 1;
16350 arm_branch_common:
16351 /* We are going to store value (shifted right by two) in the
16352 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16353 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16354 also be be clear. */
16355 if (value & temp)
16356 as_bad_where (fixP->fx_file, fixP->fx_line,
16357 _("misaligned branch destination"));
16358 if ((value & (offsetT)0xfe000000) != (offsetT)0
16359 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
16360 as_bad_where (fixP->fx_file, fixP->fx_line,
16361 _("branch out of range"));
16362
16363 if (fixP->fx_done || !seg->use_rela_p)
16364 {
16365 newval = md_chars_to_number (buf, INSN_SIZE);
16366 newval |= (value >> 2) & 0x00ffffff;
16367 /* Set the H bit on BLX instructions. */
16368 if (temp == 1)
16369 {
16370 if (value & 2)
16371 newval |= 0x01000000;
16372 else
16373 newval &= ~0x01000000;
16374 }
16375 md_number_to_chars (buf, newval, INSN_SIZE);
16376 }
16377 break;
16378
16379 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
16380 /* CZB can only branch forward. */
16381 if (value & ~0x7e)
16382 as_bad_where (fixP->fx_file, fixP->fx_line,
16383 _("branch out of range"));
16384
16385 if (fixP->fx_done || !seg->use_rela_p)
16386 {
16387 newval = md_chars_to_number (buf, THUMB_SIZE);
16388 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
16389 md_number_to_chars (buf, newval, THUMB_SIZE);
16390 }
16391 break;
16392
16393 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
16394 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
16395 as_bad_where (fixP->fx_file, fixP->fx_line,
16396 _("branch out of range"));
16397
16398 if (fixP->fx_done || !seg->use_rela_p)
16399 {
16400 newval = md_chars_to_number (buf, THUMB_SIZE);
16401 newval |= (value & 0x1ff) >> 1;
16402 md_number_to_chars (buf, newval, THUMB_SIZE);
16403 }
16404 break;
16405
16406 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
16407 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
16408 as_bad_where (fixP->fx_file, fixP->fx_line,
16409 _("branch out of range"));
16410
16411 if (fixP->fx_done || !seg->use_rela_p)
16412 {
16413 newval = md_chars_to_number (buf, THUMB_SIZE);
16414 newval |= (value & 0xfff) >> 1;
16415 md_number_to_chars (buf, newval, THUMB_SIZE);
16416 }
16417 break;
16418
16419 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16420 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
16421 as_bad_where (fixP->fx_file, fixP->fx_line,
16422 _("conditional branch out of range"));
16423
16424 if (fixP->fx_done || !seg->use_rela_p)
16425 {
16426 offsetT newval2;
16427 addressT S, J1, J2, lo, hi;
16428
16429 S = (value & 0x00100000) >> 20;
16430 J2 = (value & 0x00080000) >> 19;
16431 J1 = (value & 0x00040000) >> 18;
16432 hi = (value & 0x0003f000) >> 12;
16433 lo = (value & 0x00000ffe) >> 1;
16434
16435 newval = md_chars_to_number (buf, THUMB_SIZE);
16436 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16437 newval |= (S << 10) | hi;
16438 newval2 |= (J1 << 13) | (J2 << 11) | lo;
16439 md_number_to_chars (buf, newval, THUMB_SIZE);
16440 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16441 }
16442 break;
16443
16444 case BFD_RELOC_THUMB_PCREL_BLX:
16445 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16446 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
16447 as_bad_where (fixP->fx_file, fixP->fx_line,
16448 _("branch out of range"));
16449
16450 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
16451 /* For a BLX instruction, make sure that the relocation is rounded up
16452 to a word boundary. This follows the semantics of the instruction
16453 which specifies that bit 1 of the target address will come from bit
16454 1 of the base address. */
16455 value = (value + 1) & ~ 1;
16456
16457 if (fixP->fx_done || !seg->use_rela_p)
16458 {
16459 offsetT newval2;
16460
16461 newval = md_chars_to_number (buf, THUMB_SIZE);
16462 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16463 newval |= (value & 0x7fffff) >> 12;
16464 newval2 |= (value & 0xfff) >> 1;
16465 md_number_to_chars (buf, newval, THUMB_SIZE);
16466 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16467 }
16468 break;
16469
16470 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16471 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
16472 as_bad_where (fixP->fx_file, fixP->fx_line,
16473 _("branch out of range"));
16474
16475 if (fixP->fx_done || !seg->use_rela_p)
16476 {
16477 offsetT newval2;
16478 addressT S, I1, I2, lo, hi;
16479
16480 S = (value & 0x01000000) >> 24;
16481 I1 = (value & 0x00800000) >> 23;
16482 I2 = (value & 0x00400000) >> 22;
16483 hi = (value & 0x003ff000) >> 12;
16484 lo = (value & 0x00000ffe) >> 1;
16485
16486 I1 = !(I1 ^ S);
16487 I2 = !(I2 ^ S);
16488
16489 newval = md_chars_to_number (buf, THUMB_SIZE);
16490 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16491 newval |= (S << 10) | hi;
16492 newval2 |= (I1 << 13) | (I2 << 11) | lo;
16493 md_number_to_chars (buf, newval, THUMB_SIZE);
16494 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16495 }
16496 break;
16497
16498 case BFD_RELOC_8:
16499 if (fixP->fx_done || !seg->use_rela_p)
16500 md_number_to_chars (buf, value, 1);
16501 break;
16502
16503 case BFD_RELOC_16:
16504 if (fixP->fx_done || !seg->use_rela_p)
16505 md_number_to_chars (buf, value, 2);
16506 break;
16507
16508 #ifdef OBJ_ELF
16509 case BFD_RELOC_ARM_TLS_GD32:
16510 case BFD_RELOC_ARM_TLS_LE32:
16511 case BFD_RELOC_ARM_TLS_IE32:
16512 case BFD_RELOC_ARM_TLS_LDM32:
16513 case BFD_RELOC_ARM_TLS_LDO32:
16514 S_SET_THREAD_LOCAL (fixP->fx_addsy);
16515 /* fall through */
16516
16517 case BFD_RELOC_ARM_GOT32:
16518 case BFD_RELOC_ARM_GOTOFF:
16519 case BFD_RELOC_ARM_TARGET2:
16520 if (fixP->fx_done || !seg->use_rela_p)
16521 md_number_to_chars (buf, 0, 4);
16522 break;
16523 #endif
16524
16525 case BFD_RELOC_RVA:
16526 case BFD_RELOC_32:
16527 case BFD_RELOC_ARM_TARGET1:
16528 case BFD_RELOC_ARM_ROSEGREL32:
16529 case BFD_RELOC_ARM_SBREL32:
16530 case BFD_RELOC_32_PCREL:
16531 if (fixP->fx_done || !seg->use_rela_p)
16532 #ifdef TE_WINCE
16533 /* For WinCE we only do this for pcrel fixups. */
16534 if (fixP->fx_done || fixP->fx_pcrel)
16535 #endif
16536 md_number_to_chars (buf, value, 4);
16537 break;
16538
16539 #ifdef OBJ_ELF
16540 case BFD_RELOC_ARM_PREL31:
16541 if (fixP->fx_done || !seg->use_rela_p)
16542 {
16543 newval = md_chars_to_number (buf, 4) & 0x80000000;
16544 if ((value ^ (value >> 1)) & 0x40000000)
16545 {
16546 as_bad_where (fixP->fx_file, fixP->fx_line,
16547 _("rel31 relocation overflow"));
16548 }
16549 newval |= value & 0x7fffffff;
16550 md_number_to_chars (buf, newval, 4);
16551 }
16552 break;
16553 #endif
16554
16555 case BFD_RELOC_ARM_CP_OFF_IMM:
16556 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
16557 if (value < -1023 || value > 1023 || (value & 3))
16558 as_bad_where (fixP->fx_file, fixP->fx_line,
16559 _("co-processor offset out of range"));
16560 cp_off_common:
16561 sign = value >= 0;
16562 if (value < 0)
16563 value = -value;
16564 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16565 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16566 newval = md_chars_to_number (buf, INSN_SIZE);
16567 else
16568 newval = get_thumb32_insn (buf);
16569 newval &= 0xff7fff00;
16570 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
16571 if (value == 0)
16572 newval &= ~WRITE_BACK;
16573 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16574 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16575 md_number_to_chars (buf, newval, INSN_SIZE);
16576 else
16577 put_thumb32_insn (buf, newval);
16578 break;
16579
16580 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
16581 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
16582 if (value < -255 || value > 255)
16583 as_bad_where (fixP->fx_file, fixP->fx_line,
16584 _("co-processor offset out of range"));
16585 value *= 4;
16586 goto cp_off_common;
16587
16588 case BFD_RELOC_ARM_THUMB_OFFSET:
16589 newval = md_chars_to_number (buf, THUMB_SIZE);
16590 /* Exactly what ranges, and where the offset is inserted depends
16591 on the type of instruction, we can establish this from the
16592 top 4 bits. */
16593 switch (newval >> 12)
16594 {
16595 case 4: /* PC load. */
16596 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16597 forced to zero for these loads; md_pcrel_from has already
16598 compensated for this. */
16599 if (value & 3)
16600 as_bad_where (fixP->fx_file, fixP->fx_line,
16601 _("invalid offset, target not word aligned (0x%08lX)"),
16602 (((unsigned long) fixP->fx_frag->fr_address
16603 + (unsigned long) fixP->fx_where) & ~3)
16604 + (unsigned long) value);
16605
16606 if (value & ~0x3fc)
16607 as_bad_where (fixP->fx_file, fixP->fx_line,
16608 _("invalid offset, value too big (0x%08lX)"),
16609 (long) value);
16610
16611 newval |= value >> 2;
16612 break;
16613
16614 case 9: /* SP load/store. */
16615 if (value & ~0x3fc)
16616 as_bad_where (fixP->fx_file, fixP->fx_line,
16617 _("invalid offset, value too big (0x%08lX)"),
16618 (long) value);
16619 newval |= value >> 2;
16620 break;
16621
16622 case 6: /* Word load/store. */
16623 if (value & ~0x7c)
16624 as_bad_where (fixP->fx_file, fixP->fx_line,
16625 _("invalid offset, value too big (0x%08lX)"),
16626 (long) value);
16627 newval |= value << 4; /* 6 - 2. */
16628 break;
16629
16630 case 7: /* Byte load/store. */
16631 if (value & ~0x1f)
16632 as_bad_where (fixP->fx_file, fixP->fx_line,
16633 _("invalid offset, value too big (0x%08lX)"),
16634 (long) value);
16635 newval |= value << 6;
16636 break;
16637
16638 case 8: /* Halfword load/store. */
16639 if (value & ~0x3e)
16640 as_bad_where (fixP->fx_file, fixP->fx_line,
16641 _("invalid offset, value too big (0x%08lX)"),
16642 (long) value);
16643 newval |= value << 5; /* 6 - 1. */
16644 break;
16645
16646 default:
16647 as_bad_where (fixP->fx_file, fixP->fx_line,
16648 "Unable to process relocation for thumb opcode: %lx",
16649 (unsigned long) newval);
16650 break;
16651 }
16652 md_number_to_chars (buf, newval, THUMB_SIZE);
16653 break;
16654
16655 case BFD_RELOC_ARM_THUMB_ADD:
16656 /* This is a complicated relocation, since we use it for all of
16657 the following immediate relocations:
16658
16659 3bit ADD/SUB
16660 8bit ADD/SUB
16661 9bit ADD/SUB SP word-aligned
16662 10bit ADD PC/SP word-aligned
16663
16664 The type of instruction being processed is encoded in the
16665 instruction field:
16666
16667 0x8000 SUB
16668 0x00F0 Rd
16669 0x000F Rs
16670 */
16671 newval = md_chars_to_number (buf, THUMB_SIZE);
16672 {
16673 int rd = (newval >> 4) & 0xf;
16674 int rs = newval & 0xf;
16675 int subtract = !!(newval & 0x8000);
16676
16677 /* Check for HI regs, only very restricted cases allowed:
16678 Adjusting SP, and using PC or SP to get an address. */
16679 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
16680 || (rs > 7 && rs != REG_SP && rs != REG_PC))
16681 as_bad_where (fixP->fx_file, fixP->fx_line,
16682 _("invalid Hi register with immediate"));
16683
16684 /* If value is negative, choose the opposite instruction. */
16685 if (value < 0)
16686 {
16687 value = -value;
16688 subtract = !subtract;
16689 if (value < 0)
16690 as_bad_where (fixP->fx_file, fixP->fx_line,
16691 _("immediate value out of range"));
16692 }
16693
16694 if (rd == REG_SP)
16695 {
16696 if (value & ~0x1fc)
16697 as_bad_where (fixP->fx_file, fixP->fx_line,
16698 _("invalid immediate for stack address calculation"));
16699 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
16700 newval |= value >> 2;
16701 }
16702 else if (rs == REG_PC || rs == REG_SP)
16703 {
16704 if (subtract || value & ~0x3fc)
16705 as_bad_where (fixP->fx_file, fixP->fx_line,
16706 _("invalid immediate for address calculation (value = 0x%08lX)"),
16707 (unsigned long) value);
16708 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
16709 newval |= rd << 8;
16710 newval |= value >> 2;
16711 }
16712 else if (rs == rd)
16713 {
16714 if (value & ~0xff)
16715 as_bad_where (fixP->fx_file, fixP->fx_line,
16716 _("immediate value out of range"));
16717 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
16718 newval |= (rd << 8) | value;
16719 }
16720 else
16721 {
16722 if (value & ~0x7)
16723 as_bad_where (fixP->fx_file, fixP->fx_line,
16724 _("immediate value out of range"));
16725 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
16726 newval |= rd | (rs << 3) | (value << 6);
16727 }
16728 }
16729 md_number_to_chars (buf, newval, THUMB_SIZE);
16730 break;
16731
16732 case BFD_RELOC_ARM_THUMB_IMM:
16733 newval = md_chars_to_number (buf, THUMB_SIZE);
16734 if (value < 0 || value > 255)
16735 as_bad_where (fixP->fx_file, fixP->fx_line,
16736 _("invalid immediate: %ld is too large"),
16737 (long) value);
16738 newval |= value;
16739 md_number_to_chars (buf, newval, THUMB_SIZE);
16740 break;
16741
16742 case BFD_RELOC_ARM_THUMB_SHIFT:
16743 /* 5bit shift value (0..32). LSL cannot take 32. */
16744 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
16745 temp = newval & 0xf800;
16746 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
16747 as_bad_where (fixP->fx_file, fixP->fx_line,
16748 _("invalid shift value: %ld"), (long) value);
16749 /* Shifts of zero must be encoded as LSL. */
16750 if (value == 0)
16751 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
16752 /* Shifts of 32 are encoded as zero. */
16753 else if (value == 32)
16754 value = 0;
16755 newval |= value << 6;
16756 md_number_to_chars (buf, newval, THUMB_SIZE);
16757 break;
16758
16759 case BFD_RELOC_VTABLE_INHERIT:
16760 case BFD_RELOC_VTABLE_ENTRY:
16761 fixP->fx_done = 0;
16762 return;
16763
16764 case BFD_RELOC_UNUSED:
16765 default:
16766 as_bad_where (fixP->fx_file, fixP->fx_line,
16767 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
16768 }
16769 }
16770
16771 /* Translate internal representation of relocation info to BFD target
16772 format. */
16773
16774 arelent *
16775 tc_gen_reloc (asection *section, fixS *fixp)
16776 {
16777 arelent * reloc;
16778 bfd_reloc_code_real_type code;
16779
16780 reloc = xmalloc (sizeof (arelent));
16781
16782 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
16783 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
16784 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
16785
16786 if (fixp->fx_pcrel)
16787 {
16788 if (section->use_rela_p)
16789 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
16790 else
16791 fixp->fx_offset = reloc->address;
16792 }
16793 reloc->addend = fixp->fx_offset;
16794
16795 switch (fixp->fx_r_type)
16796 {
16797 case BFD_RELOC_8:
16798 if (fixp->fx_pcrel)
16799 {
16800 code = BFD_RELOC_8_PCREL;
16801 break;
16802 }
16803
16804 case BFD_RELOC_16:
16805 if (fixp->fx_pcrel)
16806 {
16807 code = BFD_RELOC_16_PCREL;
16808 break;
16809 }
16810
16811 case BFD_RELOC_32:
16812 if (fixp->fx_pcrel)
16813 {
16814 code = BFD_RELOC_32_PCREL;
16815 break;
16816 }
16817
16818 case BFD_RELOC_NONE:
16819 case BFD_RELOC_ARM_PCREL_BRANCH:
16820 case BFD_RELOC_ARM_PCREL_BLX:
16821 case BFD_RELOC_RVA:
16822 case BFD_RELOC_THUMB_PCREL_BRANCH7:
16823 case BFD_RELOC_THUMB_PCREL_BRANCH9:
16824 case BFD_RELOC_THUMB_PCREL_BRANCH12:
16825 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16826 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16827 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16828 case BFD_RELOC_THUMB_PCREL_BLX:
16829 case BFD_RELOC_VTABLE_ENTRY:
16830 case BFD_RELOC_VTABLE_INHERIT:
16831 code = fixp->fx_r_type;
16832 break;
16833
16834 case BFD_RELOC_ARM_LITERAL:
16835 case BFD_RELOC_ARM_HWLITERAL:
16836 /* If this is called then the a literal has
16837 been referenced across a section boundary. */
16838 as_bad_where (fixp->fx_file, fixp->fx_line,
16839 _("literal referenced across section boundary"));
16840 return NULL;
16841
16842 #ifdef OBJ_ELF
16843 case BFD_RELOC_ARM_GOT32:
16844 case BFD_RELOC_ARM_GOTOFF:
16845 case BFD_RELOC_ARM_PLT32:
16846 case BFD_RELOC_ARM_TARGET1:
16847 case BFD_RELOC_ARM_ROSEGREL32:
16848 case BFD_RELOC_ARM_SBREL32:
16849 case BFD_RELOC_ARM_PREL31:
16850 case BFD_RELOC_ARM_TARGET2:
16851 case BFD_RELOC_ARM_TLS_LE32:
16852 case BFD_RELOC_ARM_TLS_LDO32:
16853 case BFD_RELOC_ARM_PCREL_CALL:
16854 case BFD_RELOC_ARM_PCREL_JUMP:
16855 code = fixp->fx_r_type;
16856 break;
16857
16858 case BFD_RELOC_ARM_TLS_GD32:
16859 case BFD_RELOC_ARM_TLS_IE32:
16860 case BFD_RELOC_ARM_TLS_LDM32:
16861 /* BFD will include the symbol's address in the addend.
16862 But we don't want that, so subtract it out again here. */
16863 if (!S_IS_COMMON (fixp->fx_addsy))
16864 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
16865 code = fixp->fx_r_type;
16866 break;
16867 #endif
16868
16869 case BFD_RELOC_ARM_IMMEDIATE:
16870 as_bad_where (fixp->fx_file, fixp->fx_line,
16871 _("internal relocation (type: IMMEDIATE) not fixed up"));
16872 return NULL;
16873
16874 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
16875 as_bad_where (fixp->fx_file, fixp->fx_line,
16876 _("ADRL used for a symbol not defined in the same file"));
16877 return NULL;
16878
16879 case BFD_RELOC_ARM_OFFSET_IMM:
16880 if (section->use_rela_p)
16881 {
16882 code = fixp->fx_r_type;
16883 break;
16884 }
16885
16886 if (fixp->fx_addsy != NULL
16887 && !S_IS_DEFINED (fixp->fx_addsy)
16888 && S_IS_LOCAL (fixp->fx_addsy))
16889 {
16890 as_bad_where (fixp->fx_file, fixp->fx_line,
16891 _("undefined local label `%s'"),
16892 S_GET_NAME (fixp->fx_addsy));
16893 return NULL;
16894 }
16895
16896 as_bad_where (fixp->fx_file, fixp->fx_line,
16897 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
16898 return NULL;
16899
16900 default:
16901 {
16902 char * type;
16903
16904 switch (fixp->fx_r_type)
16905 {
16906 case BFD_RELOC_NONE: type = "NONE"; break;
16907 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
16908 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
16909 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
16910 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
16911 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
16912 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
16913 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
16914 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
16915 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
16916 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
16917 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
16918 default: type = _("<unknown>"); break;
16919 }
16920 as_bad_where (fixp->fx_file, fixp->fx_line,
16921 _("cannot represent %s relocation in this object file format"),
16922 type);
16923 return NULL;
16924 }
16925 }
16926
16927 #ifdef OBJ_ELF
16928 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
16929 && GOT_symbol
16930 && fixp->fx_addsy == GOT_symbol)
16931 {
16932 code = BFD_RELOC_ARM_GOTPC;
16933 reloc->addend = fixp->fx_offset = reloc->address;
16934 }
16935 #endif
16936
16937 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
16938
16939 if (reloc->howto == NULL)
16940 {
16941 as_bad_where (fixp->fx_file, fixp->fx_line,
16942 _("cannot represent %s relocation in this object file format"),
16943 bfd_get_reloc_code_name (code));
16944 return NULL;
16945 }
16946
16947 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
16948 vtable entry to be used in the relocation's section offset. */
16949 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
16950 reloc->address = fixp->fx_offset;
16951
16952 return reloc;
16953 }
16954
16955 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
16956
16957 void
16958 cons_fix_new_arm (fragS * frag,
16959 int where,
16960 int size,
16961 expressionS * exp)
16962 {
16963 bfd_reloc_code_real_type type;
16964 int pcrel = 0;
16965
16966 /* Pick a reloc.
16967 FIXME: @@ Should look at CPU word size. */
16968 switch (size)
16969 {
16970 case 1:
16971 type = BFD_RELOC_8;
16972 break;
16973 case 2:
16974 type = BFD_RELOC_16;
16975 break;
16976 case 4:
16977 default:
16978 type = BFD_RELOC_32;
16979 break;
16980 case 8:
16981 type = BFD_RELOC_64;
16982 break;
16983 }
16984
16985 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
16986 }
16987
16988 #if defined OBJ_COFF || defined OBJ_ELF
16989 void
16990 arm_validate_fix (fixS * fixP)
16991 {
16992 /* If the destination of the branch is a defined symbol which does not have
16993 the THUMB_FUNC attribute, then we must be calling a function which has
16994 the (interfacearm) attribute. We look for the Thumb entry point to that
16995 function and change the branch to refer to that function instead. */
16996 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
16997 && fixP->fx_addsy != NULL
16998 && S_IS_DEFINED (fixP->fx_addsy)
16999 && ! THUMB_IS_FUNC (fixP->fx_addsy))
17000 {
17001 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
17002 }
17003 }
17004 #endif
17005
17006 int
17007 arm_force_relocation (struct fix * fixp)
17008 {
17009 #if defined (OBJ_COFF) && defined (TE_PE)
17010 if (fixp->fx_r_type == BFD_RELOC_RVA)
17011 return 1;
17012 #endif
17013
17014 /* Resolve these relocations even if the symbol is extern or weak. */
17015 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
17016 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
17017 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
17018 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
17019 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
17020 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
17021 return 0;
17022
17023 return generic_force_reloc (fixp);
17024 }
17025
17026 #ifdef OBJ_COFF
17027 bfd_boolean
17028 arm_fix_adjustable (fixS * fixP)
17029 {
17030 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
17031 local labels from being added to the output symbol table when they
17032 are used with the ADRL pseudo op. The ADRL relocation should always
17033 be resolved before the binbary is emitted, so it is safe to say that
17034 it is adjustable. */
17035 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
17036 return 1;
17037
17038 /* This is a hack for the gas/all/redef2.s test. This test causes symbols
17039 to be cloned, and without this test relocs would still be generated
17040 against the original, pre-cloned symbol. Such symbols would not appear
17041 in the symbol table however, and so a valid reloc could not be
17042 generated. So check to see if the fixup is against a symbol which has
17043 been removed from the symbol chain, and if it is, then allow it to be
17044 adjusted into a reloc against a section symbol. */
17045 if (fixP->fx_addsy != NULL
17046 && ! S_IS_LOCAL (fixP->fx_addsy)
17047 && symbol_next (fixP->fx_addsy) == NULL
17048 && symbol_next (fixP->fx_addsy) == symbol_previous (fixP->fx_addsy))
17049 return 1;
17050
17051 return 0;
17052 }
17053 #endif
17054
17055 #ifdef OBJ_ELF
17056 /* Relocations against Thumb function names must be left unadjusted,
17057 so that the linker can use this information to correctly set the
17058 bottom bit of their addresses. The MIPS version of this function
17059 also prevents relocations that are mips-16 specific, but I do not
17060 know why it does this.
17061
17062 FIXME:
17063 There is one other problem that ought to be addressed here, but
17064 which currently is not: Taking the address of a label (rather
17065 than a function) and then later jumping to that address. Such
17066 addresses also ought to have their bottom bit set (assuming that
17067 they reside in Thumb code), but at the moment they will not. */
17068
17069 bfd_boolean
17070 arm_fix_adjustable (fixS * fixP)
17071 {
17072 if (fixP->fx_addsy == NULL)
17073 return 1;
17074
17075 if (THUMB_IS_FUNC (fixP->fx_addsy)
17076 && fixP->fx_subsy == NULL)
17077 return 0;
17078
17079 /* We need the symbol name for the VTABLE entries. */
17080 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
17081 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
17082 return 0;
17083
17084 /* Don't allow symbols to be discarded on GOT related relocs. */
17085 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
17086 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
17087 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
17088 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
17089 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
17090 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
17091 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
17092 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
17093 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
17094 return 0;
17095
17096 return 1;
17097 }
17098
17099 const char *
17100 elf32_arm_target_format (void)
17101 {
17102 #ifdef TE_SYMBIAN
17103 return (target_big_endian
17104 ? "elf32-bigarm-symbian"
17105 : "elf32-littlearm-symbian");
17106 #elif defined (TE_VXWORKS)
17107 return (target_big_endian
17108 ? "elf32-bigarm-vxworks"
17109 : "elf32-littlearm-vxworks");
17110 #else
17111 if (target_big_endian)
17112 return "elf32-bigarm";
17113 else
17114 return "elf32-littlearm";
17115 #endif
17116 }
17117
17118 void
17119 armelf_frob_symbol (symbolS * symp,
17120 int * puntp)
17121 {
17122 elf_frob_symbol (symp, puntp);
17123 }
17124 #endif
17125
17126 /* MD interface: Finalization. */
17127
17128 /* A good place to do this, although this was probably not intended
17129 for this kind of use. We need to dump the literal pool before
17130 references are made to a null symbol pointer. */
17131
17132 void
17133 arm_cleanup (void)
17134 {
17135 literal_pool * pool;
17136
17137 for (pool = list_of_pools; pool; pool = pool->next)
17138 {
17139 /* Put it at the end of the relevent section. */
17140 subseg_set (pool->section, pool->sub_section);
17141 #ifdef OBJ_ELF
17142 arm_elf_change_section ();
17143 #endif
17144 s_ltorg (0);
17145 }
17146 }
17147
17148 /* Adjust the symbol table. This marks Thumb symbols as distinct from
17149 ARM ones. */
17150
17151 void
17152 arm_adjust_symtab (void)
17153 {
17154 #ifdef OBJ_COFF
17155 symbolS * sym;
17156
17157 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17158 {
17159 if (ARM_IS_THUMB (sym))
17160 {
17161 if (THUMB_IS_FUNC (sym))
17162 {
17163 /* Mark the symbol as a Thumb function. */
17164 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
17165 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
17166 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
17167
17168 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
17169 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
17170 else
17171 as_bad (_("%s: unexpected function type: %d"),
17172 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
17173 }
17174 else switch (S_GET_STORAGE_CLASS (sym))
17175 {
17176 case C_EXT:
17177 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
17178 break;
17179 case C_STAT:
17180 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
17181 break;
17182 case C_LABEL:
17183 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
17184 break;
17185 default:
17186 /* Do nothing. */
17187 break;
17188 }
17189 }
17190
17191 if (ARM_IS_INTERWORK (sym))
17192 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
17193 }
17194 #endif
17195 #ifdef OBJ_ELF
17196 symbolS * sym;
17197 char bind;
17198
17199 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17200 {
17201 if (ARM_IS_THUMB (sym))
17202 {
17203 elf_symbol_type * elf_sym;
17204
17205 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
17206 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
17207
17208 if (! bfd_is_arm_mapping_symbol_name (elf_sym->symbol.name))
17209 {
17210 /* If it's a .thumb_func, declare it as so,
17211 otherwise tag label as .code 16. */
17212 if (THUMB_IS_FUNC (sym))
17213 elf_sym->internal_elf_sym.st_info =
17214 ELF_ST_INFO (bind, STT_ARM_TFUNC);
17215 else
17216 elf_sym->internal_elf_sym.st_info =
17217 ELF_ST_INFO (bind, STT_ARM_16BIT);
17218 }
17219 }
17220 }
17221 #endif
17222 }
17223
17224 /* MD interface: Initialization. */
17225
17226 static void
17227 set_constant_flonums (void)
17228 {
17229 int i;
17230
17231 for (i = 0; i < NUM_FLOAT_VALS; i++)
17232 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
17233 abort ();
17234 }
17235
17236 void
17237 md_begin (void)
17238 {
17239 unsigned mach;
17240 unsigned int i;
17241
17242 if ( (arm_ops_hsh = hash_new ()) == NULL
17243 || (arm_cond_hsh = hash_new ()) == NULL
17244 || (arm_shift_hsh = hash_new ()) == NULL
17245 || (arm_psr_hsh = hash_new ()) == NULL
17246 || (arm_v7m_psr_hsh = hash_new ()) == NULL
17247 || (arm_reg_hsh = hash_new ()) == NULL
17248 || (arm_reloc_hsh = hash_new ()) == NULL
17249 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
17250 as_fatal (_("virtual memory exhausted"));
17251
17252 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
17253 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
17254 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
17255 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
17256 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
17257 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
17258 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
17259 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
17260 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
17261 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
17262 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
17263 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
17264 for (i = 0;
17265 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
17266 i++)
17267 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
17268 (PTR) (barrier_opt_names + i));
17269 #ifdef OBJ_ELF
17270 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
17271 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
17272 #endif
17273
17274 set_constant_flonums ();
17275
17276 /* Set the cpu variant based on the command-line options. We prefer
17277 -mcpu= over -march= if both are set (as for GCC); and we prefer
17278 -mfpu= over any other way of setting the floating point unit.
17279 Use of legacy options with new options are faulted. */
17280 if (legacy_cpu)
17281 {
17282 if (mcpu_cpu_opt || march_cpu_opt)
17283 as_bad (_("use of old and new-style options to set CPU type"));
17284
17285 mcpu_cpu_opt = legacy_cpu;
17286 }
17287 else if (!mcpu_cpu_opt)
17288 mcpu_cpu_opt = march_cpu_opt;
17289
17290 if (legacy_fpu)
17291 {
17292 if (mfpu_opt)
17293 as_bad (_("use of old and new-style options to set FPU type"));
17294
17295 mfpu_opt = legacy_fpu;
17296 }
17297 else if (!mfpu_opt)
17298 {
17299 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17300 /* Some environments specify a default FPU. If they don't, infer it
17301 from the processor. */
17302 if (mcpu_fpu_opt)
17303 mfpu_opt = mcpu_fpu_opt;
17304 else
17305 mfpu_opt = march_fpu_opt;
17306 #else
17307 mfpu_opt = &fpu_default;
17308 #endif
17309 }
17310
17311 if (!mfpu_opt)
17312 {
17313 if (!mcpu_cpu_opt)
17314 mfpu_opt = &fpu_default;
17315 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
17316 mfpu_opt = &fpu_arch_vfp_v2;
17317 else
17318 mfpu_opt = &fpu_arch_fpa;
17319 }
17320
17321 #ifdef CPU_DEFAULT
17322 if (!mcpu_cpu_opt)
17323 {
17324 mcpu_cpu_opt = &cpu_default;
17325 selected_cpu = cpu_default;
17326 }
17327 #else
17328 if (mcpu_cpu_opt)
17329 selected_cpu = *mcpu_cpu_opt;
17330 else
17331 mcpu_cpu_opt = &arm_arch_any;
17332 #endif
17333
17334 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17335
17336 arm_arch_used = thumb_arch_used = arm_arch_none;
17337
17338 #if defined OBJ_COFF || defined OBJ_ELF
17339 {
17340 unsigned int flags = 0;
17341
17342 #if defined OBJ_ELF
17343 flags = meabi_flags;
17344
17345 switch (meabi_flags)
17346 {
17347 case EF_ARM_EABI_UNKNOWN:
17348 #endif
17349 /* Set the flags in the private structure. */
17350 if (uses_apcs_26) flags |= F_APCS26;
17351 if (support_interwork) flags |= F_INTERWORK;
17352 if (uses_apcs_float) flags |= F_APCS_FLOAT;
17353 if (pic_code) flags |= F_PIC;
17354 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
17355 flags |= F_SOFT_FLOAT;
17356
17357 switch (mfloat_abi_opt)
17358 {
17359 case ARM_FLOAT_ABI_SOFT:
17360 case ARM_FLOAT_ABI_SOFTFP:
17361 flags |= F_SOFT_FLOAT;
17362 break;
17363
17364 case ARM_FLOAT_ABI_HARD:
17365 if (flags & F_SOFT_FLOAT)
17366 as_bad (_("hard-float conflicts with specified fpu"));
17367 break;
17368 }
17369
17370 /* Using pure-endian doubles (even if soft-float). */
17371 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
17372 flags |= F_VFP_FLOAT;
17373
17374 #if defined OBJ_ELF
17375 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
17376 flags |= EF_ARM_MAVERICK_FLOAT;
17377 break;
17378
17379 case EF_ARM_EABI_VER4:
17380 case EF_ARM_EABI_VER5:
17381 /* No additional flags to set. */
17382 break;
17383
17384 default:
17385 abort ();
17386 }
17387 #endif
17388 bfd_set_private_flags (stdoutput, flags);
17389
17390 /* We have run out flags in the COFF header to encode the
17391 status of ATPCS support, so instead we create a dummy,
17392 empty, debug section called .arm.atpcs. */
17393 if (atpcs)
17394 {
17395 asection * sec;
17396
17397 sec = bfd_make_section (stdoutput, ".arm.atpcs");
17398
17399 if (sec != NULL)
17400 {
17401 bfd_set_section_flags
17402 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
17403 bfd_set_section_size (stdoutput, sec, 0);
17404 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
17405 }
17406 }
17407 }
17408 #endif
17409
17410 /* Record the CPU type as well. */
17411 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
17412 mach = bfd_mach_arm_iWMMXt;
17413 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
17414 mach = bfd_mach_arm_XScale;
17415 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
17416 mach = bfd_mach_arm_ep9312;
17417 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
17418 mach = bfd_mach_arm_5TE;
17419 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
17420 {
17421 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17422 mach = bfd_mach_arm_5T;
17423 else
17424 mach = bfd_mach_arm_5;
17425 }
17426 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
17427 {
17428 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17429 mach = bfd_mach_arm_4T;
17430 else
17431 mach = bfd_mach_arm_4;
17432 }
17433 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
17434 mach = bfd_mach_arm_3M;
17435 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
17436 mach = bfd_mach_arm_3;
17437 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
17438 mach = bfd_mach_arm_2a;
17439 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
17440 mach = bfd_mach_arm_2;
17441 else
17442 mach = bfd_mach_arm_unknown;
17443
17444 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
17445 }
17446
17447 /* Command line processing. */
17448
17449 /* md_parse_option
17450 Invocation line includes a switch not recognized by the base assembler.
17451 See if it's a processor-specific option.
17452
17453 This routine is somewhat complicated by the need for backwards
17454 compatibility (since older releases of gcc can't be changed).
17455 The new options try to make the interface as compatible as
17456 possible with GCC.
17457
17458 New options (supported) are:
17459
17460 -mcpu=<cpu name> Assemble for selected processor
17461 -march=<architecture name> Assemble for selected architecture
17462 -mfpu=<fpu architecture> Assemble for selected FPU.
17463 -EB/-mbig-endian Big-endian
17464 -EL/-mlittle-endian Little-endian
17465 -k Generate PIC code
17466 -mthumb Start in Thumb mode
17467 -mthumb-interwork Code supports ARM/Thumb interworking
17468
17469 For now we will also provide support for:
17470
17471 -mapcs-32 32-bit Program counter
17472 -mapcs-26 26-bit Program counter
17473 -macps-float Floats passed in FP registers
17474 -mapcs-reentrant Reentrant code
17475 -matpcs
17476 (sometime these will probably be replaced with -mapcs=<list of options>
17477 and -matpcs=<list of options>)
17478
17479 The remaining options are only supported for back-wards compatibility.
17480 Cpu variants, the arm part is optional:
17481 -m[arm]1 Currently not supported.
17482 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17483 -m[arm]3 Arm 3 processor
17484 -m[arm]6[xx], Arm 6 processors
17485 -m[arm]7[xx][t][[d]m] Arm 7 processors
17486 -m[arm]8[10] Arm 8 processors
17487 -m[arm]9[20][tdmi] Arm 9 processors
17488 -mstrongarm[110[0]] StrongARM processors
17489 -mxscale XScale processors
17490 -m[arm]v[2345[t[e]]] Arm architectures
17491 -mall All (except the ARM1)
17492 FP variants:
17493 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17494 -mfpe-old (No float load/store multiples)
17495 -mvfpxd VFP Single precision
17496 -mvfp All VFP
17497 -mno-fpu Disable all floating point instructions
17498
17499 The following CPU names are recognized:
17500 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17501 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17502 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17503 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17504 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17505 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17506 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17507
17508 */
17509
17510 const char * md_shortopts = "m:k";
17511
17512 #ifdef ARM_BI_ENDIAN
17513 #define OPTION_EB (OPTION_MD_BASE + 0)
17514 #define OPTION_EL (OPTION_MD_BASE + 1)
17515 #else
17516 #if TARGET_BYTES_BIG_ENDIAN
17517 #define OPTION_EB (OPTION_MD_BASE + 0)
17518 #else
17519 #define OPTION_EL (OPTION_MD_BASE + 1)
17520 #endif
17521 #endif
17522
17523 struct option md_longopts[] =
17524 {
17525 #ifdef OPTION_EB
17526 {"EB", no_argument, NULL, OPTION_EB},
17527 #endif
17528 #ifdef OPTION_EL
17529 {"EL", no_argument, NULL, OPTION_EL},
17530 #endif
17531 {NULL, no_argument, NULL, 0}
17532 };
17533
17534 size_t md_longopts_size = sizeof (md_longopts);
17535
17536 struct arm_option_table
17537 {
17538 char *option; /* Option name to match. */
17539 char *help; /* Help information. */
17540 int *var; /* Variable to change. */
17541 int value; /* What to change it to. */
17542 char *deprecated; /* If non-null, print this message. */
17543 };
17544
17545 struct arm_option_table arm_opts[] =
17546 {
17547 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
17548 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
17549 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17550 &support_interwork, 1, NULL},
17551 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
17552 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
17553 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
17554 1, NULL},
17555 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
17556 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
17557 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
17558 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
17559 NULL},
17560
17561 /* These are recognized by the assembler, but have no affect on code. */
17562 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
17563 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
17564 {NULL, NULL, NULL, 0, NULL}
17565 };
17566
17567 struct arm_legacy_option_table
17568 {
17569 char *option; /* Option name to match. */
17570 const arm_feature_set **var; /* Variable to change. */
17571 const arm_feature_set value; /* What to change it to. */
17572 char *deprecated; /* If non-null, print this message. */
17573 };
17574
17575 const struct arm_legacy_option_table arm_legacy_opts[] =
17576 {
17577 /* DON'T add any new processors to this list -- we want the whole list
17578 to go away... Add them to the processors table instead. */
17579 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17580 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17581 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17582 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17583 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17584 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17585 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17586 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17587 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17588 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17589 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17590 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17591 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17592 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17593 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17594 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17595 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17596 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17597 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17598 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17599 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17600 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17601 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17602 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17603 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17604 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17605 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17606 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17607 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17608 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17609 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17610 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17611 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17612 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17613 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17614 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17615 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17616 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17617 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17618 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17619 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17620 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17621 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17622 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17623 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17624 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17625 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17626 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17627 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17628 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17629 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17630 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17631 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17632 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17633 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17634 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17635 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17636 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17637 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17638 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17639 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17640 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17641 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17642 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17643 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17644 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17645 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17646 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17647 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
17648 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
17649 N_("use -mcpu=strongarm110")},
17650 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
17651 N_("use -mcpu=strongarm1100")},
17652 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
17653 N_("use -mcpu=strongarm1110")},
17654 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
17655 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
17656 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
17657
17658 /* Architecture variants -- don't add any more to this list either. */
17659 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17660 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17661 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17662 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17663 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17664 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17665 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17666 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17667 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17668 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17669 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17670 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17671 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17672 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17673 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17674 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17675 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17676 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17677
17678 /* Floating point variants -- don't add any more to this list either. */
17679 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
17680 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
17681 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
17682 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
17683 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17684
17685 {NULL, NULL, ARM_ARCH_NONE, NULL}
17686 };
17687
17688 struct arm_cpu_option_table
17689 {
17690 char *name;
17691 const arm_feature_set value;
17692 /* For some CPUs we assume an FPU unless the user explicitly sets
17693 -mfpu=... */
17694 const arm_feature_set default_fpu;
17695 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17696 case. */
17697 const char *canonical_name;
17698 };
17699
17700 /* This list should, at a minimum, contain all the cpu names
17701 recognized by GCC. */
17702 static const struct arm_cpu_option_table arm_cpus[] =
17703 {
17704 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
17705 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
17706 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
17707 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17708 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17709 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17710 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17711 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17712 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17713 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17714 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17715 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17716 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17717 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17718 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17719 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17720 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17721 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17722 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17723 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17724 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17725 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17726 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17727 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17728 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17729 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17730 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17731 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17732 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17733 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17734 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17735 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17736 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17737 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17738 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17739 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17740 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17741 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17742 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17743 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
17744 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17745 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17746 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17747 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17748 /* For V5 or later processors we default to using VFP; but the user
17749 should really set the FPU type explicitly. */
17750 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17751 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17752 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17753 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17754 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17755 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17756 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
17757 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17758 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17759 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
17760 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17761 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17762 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17763 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17764 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17765 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
17766 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17767 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17768 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17769 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
17770 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17771 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
17772 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
17773 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
17774 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
17775 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
17776 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
17777 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
17778 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
17779 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
17780 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
17781 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
17782 | FPU_NEON_EXT_V1),
17783 NULL},
17784 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
17785 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
17786 /* ??? XSCALE is really an architecture. */
17787 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17788 /* ??? iwmmxt is not a processor. */
17789 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
17790 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17791 /* Maverick */
17792 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
17793 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
17794 };
17795
17796 struct arm_arch_option_table
17797 {
17798 char *name;
17799 const arm_feature_set value;
17800 const arm_feature_set default_fpu;
17801 };
17802
17803 /* This list should, at a minimum, contain all the architecture names
17804 recognized by GCC. */
17805 static const struct arm_arch_option_table arm_archs[] =
17806 {
17807 {"all", ARM_ANY, FPU_ARCH_FPA},
17808 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
17809 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
17810 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
17811 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
17812 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
17813 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
17814 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
17815 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
17816 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
17817 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
17818 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
17819 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
17820 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
17821 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
17822 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
17823 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
17824 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
17825 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
17826 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
17827 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
17828 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
17829 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
17830 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
17831 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
17832 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
17833 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
17834 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
17835 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
17836 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
17837 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
17838 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
17839 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
17840 };
17841
17842 /* ISA extensions in the co-processor space. */
17843 struct arm_option_cpu_value_table
17844 {
17845 char *name;
17846 const arm_feature_set value;
17847 };
17848
17849 static const struct arm_option_cpu_value_table arm_extensions[] =
17850 {
17851 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
17852 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
17853 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
17854 {NULL, ARM_ARCH_NONE}
17855 };
17856
17857 /* This list should, at a minimum, contain all the fpu names
17858 recognized by GCC. */
17859 static const struct arm_option_cpu_value_table arm_fpus[] =
17860 {
17861 {"softfpa", FPU_NONE},
17862 {"fpe", FPU_ARCH_FPE},
17863 {"fpe2", FPU_ARCH_FPE},
17864 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
17865 {"fpa", FPU_ARCH_FPA},
17866 {"fpa10", FPU_ARCH_FPA},
17867 {"fpa11", FPU_ARCH_FPA},
17868 {"arm7500fe", FPU_ARCH_FPA},
17869 {"softvfp", FPU_ARCH_VFP},
17870 {"softvfp+vfp", FPU_ARCH_VFP_V2},
17871 {"vfp", FPU_ARCH_VFP_V2},
17872 {"vfp9", FPU_ARCH_VFP_V2},
17873 {"vfp3", FPU_ARCH_VFP_V3},
17874 {"vfp10", FPU_ARCH_VFP_V2},
17875 {"vfp10-r0", FPU_ARCH_VFP_V1},
17876 {"vfpxd", FPU_ARCH_VFP_V1xD},
17877 {"arm1020t", FPU_ARCH_VFP_V1},
17878 {"arm1020e", FPU_ARCH_VFP_V2},
17879 {"arm1136jfs", FPU_ARCH_VFP_V2},
17880 {"arm1136jf-s", FPU_ARCH_VFP_V2},
17881 {"maverick", FPU_ARCH_MAVERICK},
17882 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
17883 {NULL, ARM_ARCH_NONE}
17884 };
17885
17886 struct arm_option_value_table
17887 {
17888 char *name;
17889 long value;
17890 };
17891
17892 static const struct arm_option_value_table arm_float_abis[] =
17893 {
17894 {"hard", ARM_FLOAT_ABI_HARD},
17895 {"softfp", ARM_FLOAT_ABI_SOFTFP},
17896 {"soft", ARM_FLOAT_ABI_SOFT},
17897 {NULL, 0}
17898 };
17899
17900 #ifdef OBJ_ELF
17901 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
17902 static const struct arm_option_value_table arm_eabis[] =
17903 {
17904 {"gnu", EF_ARM_EABI_UNKNOWN},
17905 {"4", EF_ARM_EABI_VER4},
17906 {"5", EF_ARM_EABI_VER5},
17907 {NULL, 0}
17908 };
17909 #endif
17910
17911 struct arm_long_option_table
17912 {
17913 char * option; /* Substring to match. */
17914 char * help; /* Help information. */
17915 int (* func) (char * subopt); /* Function to decode sub-option. */
17916 char * deprecated; /* If non-null, print this message. */
17917 };
17918
17919 static int
17920 arm_parse_extension (char * str, const arm_feature_set **opt_p)
17921 {
17922 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
17923
17924 /* Copy the feature set, so that we can modify it. */
17925 *ext_set = **opt_p;
17926 *opt_p = ext_set;
17927
17928 while (str != NULL && *str != 0)
17929 {
17930 const struct arm_option_cpu_value_table * opt;
17931 char * ext;
17932 int optlen;
17933
17934 if (*str != '+')
17935 {
17936 as_bad (_("invalid architectural extension"));
17937 return 0;
17938 }
17939
17940 str++;
17941 ext = strchr (str, '+');
17942
17943 if (ext != NULL)
17944 optlen = ext - str;
17945 else
17946 optlen = strlen (str);
17947
17948 if (optlen == 0)
17949 {
17950 as_bad (_("missing architectural extension"));
17951 return 0;
17952 }
17953
17954 for (opt = arm_extensions; opt->name != NULL; opt++)
17955 if (strncmp (opt->name, str, optlen) == 0)
17956 {
17957 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
17958 break;
17959 }
17960
17961 if (opt->name == NULL)
17962 {
17963 as_bad (_("unknown architectural extnsion `%s'"), str);
17964 return 0;
17965 }
17966
17967 str = ext;
17968 };
17969
17970 return 1;
17971 }
17972
17973 static int
17974 arm_parse_cpu (char * str)
17975 {
17976 const struct arm_cpu_option_table * opt;
17977 char * ext = strchr (str, '+');
17978 int optlen;
17979
17980 if (ext != NULL)
17981 optlen = ext - str;
17982 else
17983 optlen = strlen (str);
17984
17985 if (optlen == 0)
17986 {
17987 as_bad (_("missing cpu name `%s'"), str);
17988 return 0;
17989 }
17990
17991 for (opt = arm_cpus; opt->name != NULL; opt++)
17992 if (strncmp (opt->name, str, optlen) == 0)
17993 {
17994 mcpu_cpu_opt = &opt->value;
17995 mcpu_fpu_opt = &opt->default_fpu;
17996 if (opt->canonical_name)
17997 strcpy(selected_cpu_name, opt->canonical_name);
17998 else
17999 {
18000 int i;
18001 for (i = 0; i < optlen; i++)
18002 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18003 selected_cpu_name[i] = 0;
18004 }
18005
18006 if (ext != NULL)
18007 return arm_parse_extension (ext, &mcpu_cpu_opt);
18008
18009 return 1;
18010 }
18011
18012 as_bad (_("unknown cpu `%s'"), str);
18013 return 0;
18014 }
18015
18016 static int
18017 arm_parse_arch (char * str)
18018 {
18019 const struct arm_arch_option_table *opt;
18020 char *ext = strchr (str, '+');
18021 int optlen;
18022
18023 if (ext != NULL)
18024 optlen = ext - str;
18025 else
18026 optlen = strlen (str);
18027
18028 if (optlen == 0)
18029 {
18030 as_bad (_("missing architecture name `%s'"), str);
18031 return 0;
18032 }
18033
18034 for (opt = arm_archs; opt->name != NULL; opt++)
18035 if (streq (opt->name, str))
18036 {
18037 march_cpu_opt = &opt->value;
18038 march_fpu_opt = &opt->default_fpu;
18039 strcpy(selected_cpu_name, opt->name);
18040
18041 if (ext != NULL)
18042 return arm_parse_extension (ext, &march_cpu_opt);
18043
18044 return 1;
18045 }
18046
18047 as_bad (_("unknown architecture `%s'\n"), str);
18048 return 0;
18049 }
18050
18051 static int
18052 arm_parse_fpu (char * str)
18053 {
18054 const struct arm_option_cpu_value_table * opt;
18055
18056 for (opt = arm_fpus; opt->name != NULL; opt++)
18057 if (streq (opt->name, str))
18058 {
18059 mfpu_opt = &opt->value;
18060 return 1;
18061 }
18062
18063 as_bad (_("unknown floating point format `%s'\n"), str);
18064 return 0;
18065 }
18066
18067 static int
18068 arm_parse_float_abi (char * str)
18069 {
18070 const struct arm_option_value_table * opt;
18071
18072 for (opt = arm_float_abis; opt->name != NULL; opt++)
18073 if (streq (opt->name, str))
18074 {
18075 mfloat_abi_opt = opt->value;
18076 return 1;
18077 }
18078
18079 as_bad (_("unknown floating point abi `%s'\n"), str);
18080 return 0;
18081 }
18082
18083 #ifdef OBJ_ELF
18084 static int
18085 arm_parse_eabi (char * str)
18086 {
18087 const struct arm_option_value_table *opt;
18088
18089 for (opt = arm_eabis; opt->name != NULL; opt++)
18090 if (streq (opt->name, str))
18091 {
18092 meabi_flags = opt->value;
18093 return 1;
18094 }
18095 as_bad (_("unknown EABI `%s'\n"), str);
18096 return 0;
18097 }
18098 #endif
18099
18100 struct arm_long_option_table arm_long_opts[] =
18101 {
18102 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
18103 arm_parse_cpu, NULL},
18104 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
18105 arm_parse_arch, NULL},
18106 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
18107 arm_parse_fpu, NULL},
18108 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
18109 arm_parse_float_abi, NULL},
18110 #ifdef OBJ_ELF
18111 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
18112 arm_parse_eabi, NULL},
18113 #endif
18114 {NULL, NULL, 0, NULL}
18115 };
18116
18117 int
18118 md_parse_option (int c, char * arg)
18119 {
18120 struct arm_option_table *opt;
18121 const struct arm_legacy_option_table *fopt;
18122 struct arm_long_option_table *lopt;
18123
18124 switch (c)
18125 {
18126 #ifdef OPTION_EB
18127 case OPTION_EB:
18128 target_big_endian = 1;
18129 break;
18130 #endif
18131
18132 #ifdef OPTION_EL
18133 case OPTION_EL:
18134 target_big_endian = 0;
18135 break;
18136 #endif
18137
18138 case 'a':
18139 /* Listing option. Just ignore these, we don't support additional
18140 ones. */
18141 return 0;
18142
18143 default:
18144 for (opt = arm_opts; opt->option != NULL; opt++)
18145 {
18146 if (c == opt->option[0]
18147 && ((arg == NULL && opt->option[1] == 0)
18148 || streq (arg, opt->option + 1)))
18149 {
18150 #if WARN_DEPRECATED
18151 /* If the option is deprecated, tell the user. */
18152 if (opt->deprecated != NULL)
18153 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18154 arg ? arg : "", _(opt->deprecated));
18155 #endif
18156
18157 if (opt->var != NULL)
18158 *opt->var = opt->value;
18159
18160 return 1;
18161 }
18162 }
18163
18164 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
18165 {
18166 if (c == fopt->option[0]
18167 && ((arg == NULL && fopt->option[1] == 0)
18168 || streq (arg, fopt->option + 1)))
18169 {
18170 #if WARN_DEPRECATED
18171 /* If the option is deprecated, tell the user. */
18172 if (fopt->deprecated != NULL)
18173 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18174 arg ? arg : "", _(fopt->deprecated));
18175 #endif
18176
18177 if (fopt->var != NULL)
18178 *fopt->var = &fopt->value;
18179
18180 return 1;
18181 }
18182 }
18183
18184 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18185 {
18186 /* These options are expected to have an argument. */
18187 if (c == lopt->option[0]
18188 && arg != NULL
18189 && strncmp (arg, lopt->option + 1,
18190 strlen (lopt->option + 1)) == 0)
18191 {
18192 #if WARN_DEPRECATED
18193 /* If the option is deprecated, tell the user. */
18194 if (lopt->deprecated != NULL)
18195 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
18196 _(lopt->deprecated));
18197 #endif
18198
18199 /* Call the sup-option parser. */
18200 return lopt->func (arg + strlen (lopt->option) - 1);
18201 }
18202 }
18203
18204 return 0;
18205 }
18206
18207 return 1;
18208 }
18209
18210 void
18211 md_show_usage (FILE * fp)
18212 {
18213 struct arm_option_table *opt;
18214 struct arm_long_option_table *lopt;
18215
18216 fprintf (fp, _(" ARM-specific assembler options:\n"));
18217
18218 for (opt = arm_opts; opt->option != NULL; opt++)
18219 if (opt->help != NULL)
18220 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
18221
18222 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18223 if (lopt->help != NULL)
18224 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
18225
18226 #ifdef OPTION_EB
18227 fprintf (fp, _("\
18228 -EB assemble code for a big-endian cpu\n"));
18229 #endif
18230
18231 #ifdef OPTION_EL
18232 fprintf (fp, _("\
18233 -EL assemble code for a little-endian cpu\n"));
18234 #endif
18235 }
18236
18237
18238 #ifdef OBJ_ELF
18239 typedef struct
18240 {
18241 int val;
18242 arm_feature_set flags;
18243 } cpu_arch_ver_table;
18244
18245 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18246 least features first. */
18247 static const cpu_arch_ver_table cpu_arch_ver[] =
18248 {
18249 {1, ARM_ARCH_V4},
18250 {2, ARM_ARCH_V4T},
18251 {3, ARM_ARCH_V5},
18252 {4, ARM_ARCH_V5TE},
18253 {5, ARM_ARCH_V5TEJ},
18254 {6, ARM_ARCH_V6},
18255 {7, ARM_ARCH_V6Z},
18256 {8, ARM_ARCH_V6K},
18257 {9, ARM_ARCH_V6T2},
18258 {10, ARM_ARCH_V7A},
18259 {10, ARM_ARCH_V7R},
18260 {10, ARM_ARCH_V7M},
18261 {0, ARM_ARCH_NONE}
18262 };
18263
18264 /* Set the public EABI object attributes. */
18265 static void
18266 aeabi_set_public_attributes (void)
18267 {
18268 int arch;
18269 arm_feature_set flags;
18270 arm_feature_set tmp;
18271 const cpu_arch_ver_table *p;
18272
18273 /* Choose the architecture based on the capabilities of the requested cpu
18274 (if any) and/or the instructions actually used. */
18275 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
18276 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
18277 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
18278
18279 tmp = flags;
18280 arch = 0;
18281 for (p = cpu_arch_ver; p->val; p++)
18282 {
18283 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
18284 {
18285 arch = p->val;
18286 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
18287 }
18288 }
18289
18290 /* Tag_CPU_name. */
18291 if (selected_cpu_name[0])
18292 {
18293 char *p;
18294
18295 p = selected_cpu_name;
18296 if (strncmp(p, "armv", 4) == 0)
18297 {
18298 int i;
18299
18300 p += 4;
18301 for (i = 0; p[i]; i++)
18302 p[i] = TOUPPER (p[i]);
18303 }
18304 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
18305 }
18306 /* Tag_CPU_arch. */
18307 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
18308 /* Tag_CPU_arch_profile. */
18309 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
18310 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
18311 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
18312 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
18313 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
18314 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
18315 /* Tag_ARM_ISA_use. */
18316 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
18317 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
18318 /* Tag_THUMB_ISA_use. */
18319 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
18320 elf32_arm_add_eabi_attr_int (stdoutput, 9,
18321 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
18322 /* Tag_VFP_arch. */
18323 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
18324 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
18325 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
18326 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
18327 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
18328 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
18329 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
18330 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
18331 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
18332 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
18333 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
18334 /* Tag_WMMX_arch. */
18335 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
18336 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
18337 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
18338 /* Tag_NEON_arch. */
18339 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
18340 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
18341 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
18342 }
18343
18344 /* Add the .ARM.attributes section. */
18345 void
18346 arm_md_end (void)
18347 {
18348 segT s;
18349 char *p;
18350 addressT addr;
18351 offsetT size;
18352
18353 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
18354 return;
18355
18356 aeabi_set_public_attributes ();
18357 size = elf32_arm_eabi_attr_size (stdoutput);
18358 s = subseg_new (".ARM.attributes", 0);
18359 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
18360 addr = frag_now_fix ();
18361 p = frag_more (size);
18362 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
18363 }
18364 #endif /* OBJ_ELF */
18365
18366
18367 /* Parse a .cpu directive. */
18368
18369 static void
18370 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
18371 {
18372 const struct arm_cpu_option_table *opt;
18373 char *name;
18374 char saved_char;
18375
18376 name = input_line_pointer;
18377 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18378 input_line_pointer++;
18379 saved_char = *input_line_pointer;
18380 *input_line_pointer = 0;
18381
18382 /* Skip the first "all" entry. */
18383 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
18384 if (streq (opt->name, name))
18385 {
18386 mcpu_cpu_opt = &opt->value;
18387 selected_cpu = opt->value;
18388 if (opt->canonical_name)
18389 strcpy(selected_cpu_name, opt->canonical_name);
18390 else
18391 {
18392 int i;
18393 for (i = 0; opt->name[i]; i++)
18394 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18395 selected_cpu_name[i] = 0;
18396 }
18397 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18398 *input_line_pointer = saved_char;
18399 demand_empty_rest_of_line ();
18400 return;
18401 }
18402 as_bad (_("unknown cpu `%s'"), name);
18403 *input_line_pointer = saved_char;
18404 ignore_rest_of_line ();
18405 }
18406
18407
18408 /* Parse a .arch directive. */
18409
18410 static void
18411 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
18412 {
18413 const struct arm_arch_option_table *opt;
18414 char saved_char;
18415 char *name;
18416
18417 name = input_line_pointer;
18418 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18419 input_line_pointer++;
18420 saved_char = *input_line_pointer;
18421 *input_line_pointer = 0;
18422
18423 /* Skip the first "all" entry. */
18424 for (opt = arm_archs + 1; opt->name != NULL; opt++)
18425 if (streq (opt->name, name))
18426 {
18427 mcpu_cpu_opt = &opt->value;
18428 selected_cpu = opt->value;
18429 strcpy(selected_cpu_name, opt->name);
18430 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18431 *input_line_pointer = saved_char;
18432 demand_empty_rest_of_line ();
18433 return;
18434 }
18435
18436 as_bad (_("unknown architecture `%s'\n"), name);
18437 *input_line_pointer = saved_char;
18438 ignore_rest_of_line ();
18439 }
18440
18441
18442 /* Parse a .fpu directive. */
18443
18444 static void
18445 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
18446 {
18447 const struct arm_option_cpu_value_table *opt;
18448 char saved_char;
18449 char *name;
18450
18451 name = input_line_pointer;
18452 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18453 input_line_pointer++;
18454 saved_char = *input_line_pointer;
18455 *input_line_pointer = 0;
18456
18457 for (opt = arm_fpus; opt->name != NULL; opt++)
18458 if (streq (opt->name, name))
18459 {
18460 mfpu_opt = &opt->value;
18461 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18462 *input_line_pointer = saved_char;
18463 demand_empty_rest_of_line ();
18464 return;
18465 }
18466
18467 as_bad (_("unknown floating point format `%s'\n"), name);
18468 *input_line_pointer = saved_char;
18469 ignore_rest_of_line ();
18470 }
18471