* gas/config/tc-arm.c (neon_is_quarter_float): Move, and rename to...
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <string.h>
29 #include <limits.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33
34 /* Need TARGET_CPU. */
35 #include "config.h"
36 #include "subsegs.h"
37 #include "obstack.h"
38 #include "symbols.h"
39 #include "listing.h"
40
41 #include "opcode/arm.h"
42
43 #ifdef OBJ_ELF
44 #include "elf/arm.h"
45 #include "dwarf2dbg.h"
46 #include "dw2gencfi.h"
47 #endif
48
49 /* XXX Set this to 1 after the next binutils release. */
50 #define WARN_DEPRECATED 0
51
52 #ifdef OBJ_ELF
53 /* Must be at least the size of the largest unwind opcode (currently two). */
54 #define ARM_OPCODE_CHUNK_SIZE 8
55
56 /* This structure holds the unwinding state. */
57
58 static struct
59 {
60 symbolS * proc_start;
61 symbolS * table_entry;
62 symbolS * personality_routine;
63 int personality_index;
64 /* The segment containing the function. */
65 segT saved_seg;
66 subsegT saved_subseg;
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes;
69 int opcode_count;
70 int opcode_alloc;
71 /* The number of bytes pushed to the stack. */
72 offsetT frame_size;
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
79 offsetT fp_offset;
80 int fp_reg;
81 /* Nonzero if an unwind_setfp directive has been seen. */
82 unsigned fp_used:1;
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored:1;
85 } unwind;
86
87 /* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90 static unsigned int marked_pr_dependency = 0;
91
92 #endif /* OBJ_ELF */
93
94 enum arm_float_abi
95 {
96 ARM_FLOAT_ABI_HARD,
97 ARM_FLOAT_ABI_SOFTFP,
98 ARM_FLOAT_ABI_SOFT
99 };
100
101 /* Types of processor to assemble for. */
102 #ifndef CPU_DEFAULT
103 #if defined __XSCALE__
104 #define CPU_DEFAULT ARM_ARCH_XSCALE
105 #else
106 #if defined __thumb__
107 #define CPU_DEFAULT ARM_ARCH_V5T
108 #endif
109 #endif
110 #endif
111
112 #ifndef FPU_DEFAULT
113 # ifdef TE_LINUX
114 # define FPU_DEFAULT FPU_ARCH_FPA
115 # elif defined (TE_NetBSD)
116 # ifdef OBJ_ELF
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
118 # else
119 /* Legacy a.out format. */
120 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
121 # endif
122 # elif defined (TE_VXWORKS)
123 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
124 # else
125 /* For backwards compatibility, default to FPA. */
126 # define FPU_DEFAULT FPU_ARCH_FPA
127 # endif
128 #endif /* ifndef FPU_DEFAULT */
129
130 #define streq(a, b) (strcmp (a, b) == 0)
131
132 static arm_feature_set cpu_variant;
133 static arm_feature_set arm_arch_used;
134 static arm_feature_set thumb_arch_used;
135
136 /* Flags stored in private area of BFD structure. */
137 static int uses_apcs_26 = FALSE;
138 static int atpcs = FALSE;
139 static int support_interwork = FALSE;
140 static int uses_apcs_float = FALSE;
141 static int pic_code = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154
155 /* Constants for known architecture features. */
156 static const arm_feature_set fpu_default = FPU_DEFAULT;
157 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
158 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
159 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
160 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
161 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
162 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
163 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
164 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
165
166 #ifdef CPU_DEFAULT
167 static const arm_feature_set cpu_default = CPU_DEFAULT;
168 #endif
169
170 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
171 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
173 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
174 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
175 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
176 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
177 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
178 static const arm_feature_set arm_ext_v4t_5 =
179 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
180 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
181 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
182 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
183 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
184 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
185 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
186 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
189 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
190 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
191 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
192 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
193 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
194
195 static const arm_feature_set arm_arch_any = ARM_ANY;
196 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
197 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
198 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
199
200 static const arm_feature_set arm_cext_iwmmxt =
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
202 static const arm_feature_set arm_cext_xscale =
203 ARM_FEATURE (0, ARM_CEXT_XSCALE);
204 static const arm_feature_set arm_cext_maverick =
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
206 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
207 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
208 static const arm_feature_set fpu_vfp_ext_v1xd =
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
210 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
211 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
212 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
213 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
214 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
215 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
216
217 static int mfloat_abi_opt = -1;
218 /* Record user cpu selection for object attributes. */
219 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
220 /* Must be long enough to hold any of the names in arm_cpus. */
221 static char selected_cpu_name[16];
222 #ifdef OBJ_ELF
223 # ifdef EABI_DEFAULT
224 static int meabi_flags = EABI_DEFAULT;
225 # else
226 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
227 # endif
228 #endif
229
230 #ifdef OBJ_ELF
231 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232 symbolS * GOT_symbol;
233 #endif
234
235 /* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
238 instructions. */
239 static int thumb_mode = 0;
240
241 /* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
243
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
248 there.)
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
251 machine code.
252
253 Important differences from the old Thumb mode:
254
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
263
264 static bfd_boolean unified_syntax = FALSE;
265
266 enum neon_el_type
267 {
268 NT_invtype,
269 NT_untyped,
270 NT_integer,
271 NT_float,
272 NT_poly,
273 NT_signed,
274 NT_unsigned
275 };
276
277 struct neon_type_el
278 {
279 enum neon_el_type type;
280 unsigned size;
281 };
282
283 #define NEON_MAX_TYPE_ELS 4
284
285 struct neon_type
286 {
287 struct neon_type_el el[NEON_MAX_TYPE_ELS];
288 unsigned elems;
289 };
290
291 struct arm_it
292 {
293 const char * error;
294 unsigned long instruction;
295 int size;
296 int size_req;
297 int cond;
298 struct neon_type vectype;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
301 unsigned long relax;
302 struct
303 {
304 bfd_reloc_code_real_type type;
305 expressionS exp;
306 int pc_rel;
307 } reloc;
308
309 struct
310 {
311 unsigned reg;
312 signed int imm;
313 struct neon_type_el vectype;
314 unsigned present : 1; /* Operand present. */
315 unsigned isreg : 1; /* Operand was a register. */
316 unsigned immisreg : 1; /* .imm field is a second register. */
317 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
318 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
319 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
320 instructions. This allows us to disambiguate ARM <-> vector insns. */
321 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
322 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
323 unsigned hasreloc : 1; /* Operand has relocation suffix. */
324 unsigned writeback : 1; /* Operand has trailing ! */
325 unsigned preind : 1; /* Preindexed address. */
326 unsigned postind : 1; /* Postindexed address. */
327 unsigned negative : 1; /* Index register was negated. */
328 unsigned shifted : 1; /* Shift applied to operation. */
329 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
330 } operands[6];
331 };
332
333 static struct arm_it inst;
334
335 #define NUM_FLOAT_VALS 8
336
337 const char * fp_const[] =
338 {
339 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
340 };
341
342 /* Number of littlenums required to hold an extended precision number. */
343 #define MAX_LITTLENUMS 6
344
345 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
346
347 #define FAIL (-1)
348 #define SUCCESS (0)
349
350 #define SUFF_S 1
351 #define SUFF_D 2
352 #define SUFF_E 3
353 #define SUFF_P 4
354
355 #define CP_T_X 0x00008000
356 #define CP_T_Y 0x00400000
357
358 #define CONDS_BIT 0x00100000
359 #define LOAD_BIT 0x00100000
360
361 #define DOUBLE_LOAD_FLAG 0x00000001
362
363 struct asm_cond
364 {
365 const char * template;
366 unsigned long value;
367 };
368
369 #define COND_ALWAYS 0xE
370
371 struct asm_psr
372 {
373 const char *template;
374 unsigned long field;
375 };
376
377 struct asm_barrier_opt
378 {
379 const char *template;
380 unsigned long value;
381 };
382
383 /* The bit that distinguishes CPSR and SPSR. */
384 #define SPSR_BIT (1 << 22)
385
386 /* The individual PSR flag bits. */
387 #define PSR_c (1 << 16)
388 #define PSR_x (1 << 17)
389 #define PSR_s (1 << 18)
390 #define PSR_f (1 << 19)
391
392 struct reloc_entry
393 {
394 char *name;
395 bfd_reloc_code_real_type reloc;
396 };
397
398 enum vfp_reg_pos
399 {
400 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
401 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
402 };
403
404 enum vfp_ldstm_type
405 {
406 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
407 };
408
409 /* Bits for DEFINED field in neon_typed_alias. */
410 #define NTA_HASTYPE 1
411 #define NTA_HASINDEX 2
412
413 struct neon_typed_alias
414 {
415 unsigned char defined;
416 unsigned char index;
417 struct neon_type_el eltype;
418 };
419
420 /* ARM register categories. This includes coprocessor numbers and various
421 architecture extensions' registers. */
422 enum arm_reg_type
423 {
424 REG_TYPE_RN,
425 REG_TYPE_CP,
426 REG_TYPE_CN,
427 REG_TYPE_FN,
428 REG_TYPE_VFS,
429 REG_TYPE_VFD,
430 REG_TYPE_NQ,
431 REG_TYPE_NDQ,
432 REG_TYPE_VFC,
433 REG_TYPE_MVF,
434 REG_TYPE_MVD,
435 REG_TYPE_MVFX,
436 REG_TYPE_MVDX,
437 REG_TYPE_MVAX,
438 REG_TYPE_DSPSC,
439 REG_TYPE_MMXWR,
440 REG_TYPE_MMXWC,
441 REG_TYPE_MMXWCG,
442 REG_TYPE_XSCALE,
443 };
444
445 /* Structure for a hash table entry for a register.
446 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
447 information which states whether a vector type or index is specified (for a
448 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
449 struct reg_entry
450 {
451 const char *name;
452 unsigned char number;
453 unsigned char type;
454 unsigned char builtin;
455 struct neon_typed_alias *neon;
456 };
457
458 /* Diagnostics used when we don't get a register of the expected type. */
459 const char *const reg_expected_msgs[] =
460 {
461 N_("ARM register expected"),
462 N_("bad or missing co-processor number"),
463 N_("co-processor register expected"),
464 N_("FPA register expected"),
465 N_("VFP single precision register expected"),
466 N_("VFP/Neon double precision register expected"),
467 N_("Neon quad precision register expected"),
468 N_("Neon double or quad precision register expected"),
469 N_("VFP system register expected"),
470 N_("Maverick MVF register expected"),
471 N_("Maverick MVD register expected"),
472 N_("Maverick MVFX register expected"),
473 N_("Maverick MVDX register expected"),
474 N_("Maverick MVAX register expected"),
475 N_("Maverick DSPSC register expected"),
476 N_("iWMMXt data register expected"),
477 N_("iWMMXt control register expected"),
478 N_("iWMMXt scalar register expected"),
479 N_("XScale accumulator register expected"),
480 };
481
482 /* Some well known registers that we refer to directly elsewhere. */
483 #define REG_SP 13
484 #define REG_LR 14
485 #define REG_PC 15
486
487 /* ARM instructions take 4bytes in the object file, Thumb instructions
488 take 2: */
489 #define INSN_SIZE 4
490
491 struct asm_opcode
492 {
493 /* Basic string to match. */
494 const char *template;
495
496 /* Parameters to instruction. */
497 unsigned char operands[8];
498
499 /* Conditional tag - see opcode_lookup. */
500 unsigned int tag : 4;
501
502 /* Basic instruction code. */
503 unsigned int avalue : 28;
504
505 /* Thumb-format instruction code. */
506 unsigned int tvalue;
507
508 /* Which architecture variant provides this instruction. */
509 const arm_feature_set *avariant;
510 const arm_feature_set *tvariant;
511
512 /* Function to call to encode instruction in ARM format. */
513 void (* aencode) (void);
514
515 /* Function to call to encode instruction in Thumb format. */
516 void (* tencode) (void);
517 };
518
519 /* Defines for various bits that we will want to toggle. */
520 #define INST_IMMEDIATE 0x02000000
521 #define OFFSET_REG 0x02000000
522 #define HWOFFSET_IMM 0x00400000
523 #define SHIFT_BY_REG 0x00000010
524 #define PRE_INDEX 0x01000000
525 #define INDEX_UP 0x00800000
526 #define WRITE_BACK 0x00200000
527 #define LDM_TYPE_2_OR_3 0x00400000
528
529 #define LITERAL_MASK 0xf000f000
530 #define OPCODE_MASK 0xfe1fffff
531 #define V4_STR_BIT 0x00000020
532
533 #define DATA_OP_SHIFT 21
534
535 #define T2_OPCODE_MASK 0xfe1fffff
536 #define T2_DATA_OP_SHIFT 21
537
538 /* Codes to distinguish the arithmetic instructions. */
539 #define OPCODE_AND 0
540 #define OPCODE_EOR 1
541 #define OPCODE_SUB 2
542 #define OPCODE_RSB 3
543 #define OPCODE_ADD 4
544 #define OPCODE_ADC 5
545 #define OPCODE_SBC 6
546 #define OPCODE_RSC 7
547 #define OPCODE_TST 8
548 #define OPCODE_TEQ 9
549 #define OPCODE_CMP 10
550 #define OPCODE_CMN 11
551 #define OPCODE_ORR 12
552 #define OPCODE_MOV 13
553 #define OPCODE_BIC 14
554 #define OPCODE_MVN 15
555
556 #define T2_OPCODE_AND 0
557 #define T2_OPCODE_BIC 1
558 #define T2_OPCODE_ORR 2
559 #define T2_OPCODE_ORN 3
560 #define T2_OPCODE_EOR 4
561 #define T2_OPCODE_ADD 8
562 #define T2_OPCODE_ADC 10
563 #define T2_OPCODE_SBC 11
564 #define T2_OPCODE_SUB 13
565 #define T2_OPCODE_RSB 14
566
567 #define T_OPCODE_MUL 0x4340
568 #define T_OPCODE_TST 0x4200
569 #define T_OPCODE_CMN 0x42c0
570 #define T_OPCODE_NEG 0x4240
571 #define T_OPCODE_MVN 0x43c0
572
573 #define T_OPCODE_ADD_R3 0x1800
574 #define T_OPCODE_SUB_R3 0x1a00
575 #define T_OPCODE_ADD_HI 0x4400
576 #define T_OPCODE_ADD_ST 0xb000
577 #define T_OPCODE_SUB_ST 0xb080
578 #define T_OPCODE_ADD_SP 0xa800
579 #define T_OPCODE_ADD_PC 0xa000
580 #define T_OPCODE_ADD_I8 0x3000
581 #define T_OPCODE_SUB_I8 0x3800
582 #define T_OPCODE_ADD_I3 0x1c00
583 #define T_OPCODE_SUB_I3 0x1e00
584
585 #define T_OPCODE_ASR_R 0x4100
586 #define T_OPCODE_LSL_R 0x4080
587 #define T_OPCODE_LSR_R 0x40c0
588 #define T_OPCODE_ROR_R 0x41c0
589 #define T_OPCODE_ASR_I 0x1000
590 #define T_OPCODE_LSL_I 0x0000
591 #define T_OPCODE_LSR_I 0x0800
592
593 #define T_OPCODE_MOV_I8 0x2000
594 #define T_OPCODE_CMP_I8 0x2800
595 #define T_OPCODE_CMP_LR 0x4280
596 #define T_OPCODE_MOV_HR 0x4600
597 #define T_OPCODE_CMP_HR 0x4500
598
599 #define T_OPCODE_LDR_PC 0x4800
600 #define T_OPCODE_LDR_SP 0x9800
601 #define T_OPCODE_STR_SP 0x9000
602 #define T_OPCODE_LDR_IW 0x6800
603 #define T_OPCODE_STR_IW 0x6000
604 #define T_OPCODE_LDR_IH 0x8800
605 #define T_OPCODE_STR_IH 0x8000
606 #define T_OPCODE_LDR_IB 0x7800
607 #define T_OPCODE_STR_IB 0x7000
608 #define T_OPCODE_LDR_RW 0x5800
609 #define T_OPCODE_STR_RW 0x5000
610 #define T_OPCODE_LDR_RH 0x5a00
611 #define T_OPCODE_STR_RH 0x5200
612 #define T_OPCODE_LDR_RB 0x5c00
613 #define T_OPCODE_STR_RB 0x5400
614
615 #define T_OPCODE_PUSH 0xb400
616 #define T_OPCODE_POP 0xbc00
617
618 #define T_OPCODE_BRANCH 0xe000
619
620 #define THUMB_SIZE 2 /* Size of thumb instruction. */
621 #define THUMB_PP_PC_LR 0x0100
622 #define THUMB_LOAD_BIT 0x0800
623 #define THUMB2_LOAD_BIT 0x00100000
624
625 #define BAD_ARGS _("bad arguments to instruction")
626 #define BAD_PC _("r15 not allowed here")
627 #define BAD_COND _("instruction cannot be conditional")
628 #define BAD_OVERLAP _("registers may not be the same")
629 #define BAD_HIREG _("lo register required")
630 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
631 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
632 #define BAD_BRANCH _("branch must be last instruction in IT block")
633 #define BAD_NOT_IT _("instruction not allowed in IT block")
634
635 static struct hash_control *arm_ops_hsh;
636 static struct hash_control *arm_cond_hsh;
637 static struct hash_control *arm_shift_hsh;
638 static struct hash_control *arm_psr_hsh;
639 static struct hash_control *arm_v7m_psr_hsh;
640 static struct hash_control *arm_reg_hsh;
641 static struct hash_control *arm_reloc_hsh;
642 static struct hash_control *arm_barrier_opt_hsh;
643
644 /* Stuff needed to resolve the label ambiguity
645 As:
646 ...
647 label: <insn>
648 may differ from:
649 ...
650 label:
651 <insn>
652 */
653
654 symbolS * last_label_seen;
655 static int label_is_thumb_function_name = FALSE;
656 \f
657 /* Literal pool structure. Held on a per-section
658 and per-sub-section basis. */
659
660 #define MAX_LITERAL_POOL_SIZE 1024
661 typedef struct literal_pool
662 {
663 expressionS literals [MAX_LITERAL_POOL_SIZE];
664 unsigned int next_free_entry;
665 unsigned int id;
666 symbolS * symbol;
667 segT section;
668 subsegT sub_section;
669 struct literal_pool * next;
670 } literal_pool;
671
672 /* Pointer to a linked list of literal pools. */
673 literal_pool * list_of_pools = NULL;
674
675 /* State variables for IT block handling. */
676 static bfd_boolean current_it_mask = 0;
677 static int current_cc;
678
679 \f
680 /* Pure syntax. */
681
682 /* This array holds the chars that always start a comment. If the
683 pre-processor is disabled, these aren't very useful. */
684 const char comment_chars[] = "@";
685
686 /* This array holds the chars that only start a comment at the beginning of
687 a line. If the line seems to have the form '# 123 filename'
688 .line and .file directives will appear in the pre-processed output. */
689 /* Note that input_file.c hand checks for '#' at the beginning of the
690 first line of the input file. This is because the compiler outputs
691 #NO_APP at the beginning of its output. */
692 /* Also note that comments like this one will always work. */
693 const char line_comment_chars[] = "#";
694
695 const char line_separator_chars[] = ";";
696
697 /* Chars that can be used to separate mant
698 from exp in floating point numbers. */
699 const char EXP_CHARS[] = "eE";
700
701 /* Chars that mean this number is a floating point constant. */
702 /* As in 0f12.456 */
703 /* or 0d1.2345e12 */
704
705 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
706
707 /* Prefix characters that indicate the start of an immediate
708 value. */
709 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
710
711 /* Separator character handling. */
712
713 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
714
715 static inline int
716 skip_past_char (char ** str, char c)
717 {
718 if (**str == c)
719 {
720 (*str)++;
721 return SUCCESS;
722 }
723 else
724 return FAIL;
725 }
726 #define skip_past_comma(str) skip_past_char (str, ',')
727
728 /* Arithmetic expressions (possibly involving symbols). */
729
730 /* Return TRUE if anything in the expression is a bignum. */
731
732 static int
733 walk_no_bignums (symbolS * sp)
734 {
735 if (symbol_get_value_expression (sp)->X_op == O_big)
736 return 1;
737
738 if (symbol_get_value_expression (sp)->X_add_symbol)
739 {
740 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
741 || (symbol_get_value_expression (sp)->X_op_symbol
742 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
743 }
744
745 return 0;
746 }
747
748 static int in_my_get_expression = 0;
749
750 /* Third argument to my_get_expression. */
751 #define GE_NO_PREFIX 0
752 #define GE_IMM_PREFIX 1
753 #define GE_OPT_PREFIX 2
754 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
755 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
756 #define GE_OPT_PREFIX_BIG 3
757
758 static int
759 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
760 {
761 char * save_in;
762 segT seg;
763
764 /* In unified syntax, all prefixes are optional. */
765 if (unified_syntax)
766 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
767 : GE_OPT_PREFIX;
768
769 switch (prefix_mode)
770 {
771 case GE_NO_PREFIX: break;
772 case GE_IMM_PREFIX:
773 if (!is_immediate_prefix (**str))
774 {
775 inst.error = _("immediate expression requires a # prefix");
776 return FAIL;
777 }
778 (*str)++;
779 break;
780 case GE_OPT_PREFIX:
781 case GE_OPT_PREFIX_BIG:
782 if (is_immediate_prefix (**str))
783 (*str)++;
784 break;
785 default: abort ();
786 }
787
788 memset (ep, 0, sizeof (expressionS));
789
790 save_in = input_line_pointer;
791 input_line_pointer = *str;
792 in_my_get_expression = 1;
793 seg = expression (ep);
794 in_my_get_expression = 0;
795
796 if (ep->X_op == O_illegal)
797 {
798 /* We found a bad expression in md_operand(). */
799 *str = input_line_pointer;
800 input_line_pointer = save_in;
801 if (inst.error == NULL)
802 inst.error = _("bad expression");
803 return 1;
804 }
805
806 #ifdef OBJ_AOUT
807 if (seg != absolute_section
808 && seg != text_section
809 && seg != data_section
810 && seg != bss_section
811 && seg != undefined_section)
812 {
813 inst.error = _("bad segment");
814 *str = input_line_pointer;
815 input_line_pointer = save_in;
816 return 1;
817 }
818 #endif
819
820 /* Get rid of any bignums now, so that we don't generate an error for which
821 we can't establish a line number later on. Big numbers are never valid
822 in instructions, which is where this routine is always called. */
823 if (prefix_mode != GE_OPT_PREFIX_BIG
824 && (ep->X_op == O_big
825 || (ep->X_add_symbol
826 && (walk_no_bignums (ep->X_add_symbol)
827 || (ep->X_op_symbol
828 && walk_no_bignums (ep->X_op_symbol))))))
829 {
830 inst.error = _("invalid constant");
831 *str = input_line_pointer;
832 input_line_pointer = save_in;
833 return 1;
834 }
835
836 *str = input_line_pointer;
837 input_line_pointer = save_in;
838 return 0;
839 }
840
841 /* Turn a string in input_line_pointer into a floating point constant
842 of type TYPE, and store the appropriate bytes in *LITP. The number
843 of LITTLENUMS emitted is stored in *SIZEP. An error message is
844 returned, or NULL on OK.
845
846 Note that fp constants aren't represent in the normal way on the ARM.
847 In big endian mode, things are as expected. However, in little endian
848 mode fp constants are big-endian word-wise, and little-endian byte-wise
849 within the words. For example, (double) 1.1 in big endian mode is
850 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
851 the byte sequence 99 99 f1 3f 9a 99 99 99.
852
853 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
854
855 char *
856 md_atof (int type, char * litP, int * sizeP)
857 {
858 int prec;
859 LITTLENUM_TYPE words[MAX_LITTLENUMS];
860 char *t;
861 int i;
862
863 switch (type)
864 {
865 case 'f':
866 case 'F':
867 case 's':
868 case 'S':
869 prec = 2;
870 break;
871
872 case 'd':
873 case 'D':
874 case 'r':
875 case 'R':
876 prec = 4;
877 break;
878
879 case 'x':
880 case 'X':
881 prec = 6;
882 break;
883
884 case 'p':
885 case 'P':
886 prec = 6;
887 break;
888
889 default:
890 *sizeP = 0;
891 return _("bad call to MD_ATOF()");
892 }
893
894 t = atof_ieee (input_line_pointer, type, words);
895 if (t)
896 input_line_pointer = t;
897 *sizeP = prec * 2;
898
899 if (target_big_endian)
900 {
901 for (i = 0; i < prec; i++)
902 {
903 md_number_to_chars (litP, (valueT) words[i], 2);
904 litP += 2;
905 }
906 }
907 else
908 {
909 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
910 for (i = prec - 1; i >= 0; i--)
911 {
912 md_number_to_chars (litP, (valueT) words[i], 2);
913 litP += 2;
914 }
915 else
916 /* For a 4 byte float the order of elements in `words' is 1 0.
917 For an 8 byte float the order is 1 0 3 2. */
918 for (i = 0; i < prec; i += 2)
919 {
920 md_number_to_chars (litP, (valueT) words[i + 1], 2);
921 md_number_to_chars (litP + 2, (valueT) words[i], 2);
922 litP += 4;
923 }
924 }
925
926 return 0;
927 }
928
929 /* We handle all bad expressions here, so that we can report the faulty
930 instruction in the error message. */
931 void
932 md_operand (expressionS * expr)
933 {
934 if (in_my_get_expression)
935 expr->X_op = O_illegal;
936 }
937
938 /* Immediate values. */
939
940 /* Generic immediate-value read function for use in directives.
941 Accepts anything that 'expression' can fold to a constant.
942 *val receives the number. */
943 #ifdef OBJ_ELF
944 static int
945 immediate_for_directive (int *val)
946 {
947 expressionS exp;
948 exp.X_op = O_illegal;
949
950 if (is_immediate_prefix (*input_line_pointer))
951 {
952 input_line_pointer++;
953 expression (&exp);
954 }
955
956 if (exp.X_op != O_constant)
957 {
958 as_bad (_("expected #constant"));
959 ignore_rest_of_line ();
960 return FAIL;
961 }
962 *val = exp.X_add_number;
963 return SUCCESS;
964 }
965 #endif
966
967 /* Register parsing. */
968
969 /* Generic register parser. CCP points to what should be the
970 beginning of a register name. If it is indeed a valid register
971 name, advance CCP over it and return the reg_entry structure;
972 otherwise return NULL. Does not issue diagnostics. */
973
974 static struct reg_entry *
975 arm_reg_parse_multi (char **ccp)
976 {
977 char *start = *ccp;
978 char *p;
979 struct reg_entry *reg;
980
981 #ifdef REGISTER_PREFIX
982 if (*start != REGISTER_PREFIX)
983 return NULL;
984 start++;
985 #endif
986 #ifdef OPTIONAL_REGISTER_PREFIX
987 if (*start == OPTIONAL_REGISTER_PREFIX)
988 start++;
989 #endif
990
991 p = start;
992 if (!ISALPHA (*p) || !is_name_beginner (*p))
993 return NULL;
994
995 do
996 p++;
997 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
998
999 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1000
1001 if (!reg)
1002 return NULL;
1003
1004 *ccp = p;
1005 return reg;
1006 }
1007
1008 static int
1009 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1010 enum arm_reg_type type)
1011 {
1012 /* Alternative syntaxes are accepted for a few register classes. */
1013 switch (type)
1014 {
1015 case REG_TYPE_MVF:
1016 case REG_TYPE_MVD:
1017 case REG_TYPE_MVFX:
1018 case REG_TYPE_MVDX:
1019 /* Generic coprocessor register names are allowed for these. */
1020 if (reg && reg->type == REG_TYPE_CN)
1021 return reg->number;
1022 break;
1023
1024 case REG_TYPE_CP:
1025 /* For backward compatibility, a bare number is valid here. */
1026 {
1027 unsigned long processor = strtoul (start, ccp, 10);
1028 if (*ccp != start && processor <= 15)
1029 return processor;
1030 }
1031
1032 case REG_TYPE_MMXWC:
1033 /* WC includes WCG. ??? I'm not sure this is true for all
1034 instructions that take WC registers. */
1035 if (reg && reg->type == REG_TYPE_MMXWCG)
1036 return reg->number;
1037 break;
1038
1039 default:
1040 break;
1041 }
1042
1043 return FAIL;
1044 }
1045
1046 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1047 return value is the register number or FAIL. */
1048
1049 static int
1050 arm_reg_parse (char **ccp, enum arm_reg_type type)
1051 {
1052 char *start = *ccp;
1053 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1054 int ret;
1055
1056 /* Do not allow a scalar (reg+index) to parse as a register. */
1057 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1058 return FAIL;
1059
1060 if (reg && reg->type == type)
1061 return reg->number;
1062
1063 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1064 return ret;
1065
1066 *ccp = start;
1067 return FAIL;
1068 }
1069
1070 /* Parse a Neon type specifier. *STR should point at the leading '.'
1071 character. Does no verification at this stage that the type fits the opcode
1072 properly. E.g.,
1073
1074 .i32.i32.s16
1075 .s32.f32
1076 .u16
1077
1078 Can all be legally parsed by this function.
1079
1080 Fills in neon_type struct pointer with parsed information, and updates STR
1081 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1082 type, FAIL if not. */
1083
1084 static int
1085 parse_neon_type (struct neon_type *type, char **str)
1086 {
1087 char *ptr = *str;
1088
1089 if (type)
1090 type->elems = 0;
1091
1092 while (type->elems < NEON_MAX_TYPE_ELS)
1093 {
1094 enum neon_el_type thistype = NT_untyped;
1095 unsigned thissize = -1u;
1096
1097 if (*ptr != '.')
1098 break;
1099
1100 ptr++;
1101
1102 /* Just a size without an explicit type. */
1103 if (ISDIGIT (*ptr))
1104 goto parsesize;
1105
1106 switch (TOLOWER (*ptr))
1107 {
1108 case 'i': thistype = NT_integer; break;
1109 case 'f': thistype = NT_float; break;
1110 case 'p': thistype = NT_poly; break;
1111 case 's': thistype = NT_signed; break;
1112 case 'u': thistype = NT_unsigned; break;
1113 default:
1114 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1115 return FAIL;
1116 }
1117
1118 ptr++;
1119
1120 /* .f is an abbreviation for .f32. */
1121 if (thistype == NT_float && !ISDIGIT (*ptr))
1122 thissize = 32;
1123 else
1124 {
1125 parsesize:
1126 thissize = strtoul (ptr, &ptr, 10);
1127
1128 if (thissize != 8 && thissize != 16 && thissize != 32
1129 && thissize != 64)
1130 {
1131 as_bad (_("bad size %d in type specifier"), thissize);
1132 return FAIL;
1133 }
1134 }
1135
1136 if (type)
1137 {
1138 type->el[type->elems].type = thistype;
1139 type->el[type->elems].size = thissize;
1140 type->elems++;
1141 }
1142 }
1143
1144 /* Empty/missing type is not a successful parse. */
1145 if (type->elems == 0)
1146 return FAIL;
1147
1148 *str = ptr;
1149
1150 return SUCCESS;
1151 }
1152
1153 /* Errors may be set multiple times during parsing or bit encoding
1154 (particularly in the Neon bits), but usually the earliest error which is set
1155 will be the most meaningful. Avoid overwriting it with later (cascading)
1156 errors by calling this function. */
1157
1158 static void
1159 first_error (const char *err)
1160 {
1161 if (!inst.error)
1162 inst.error = err;
1163 }
1164
1165 /* Parse a single type, e.g. ".s32", leading period included. */
1166 static int
1167 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1168 {
1169 char *str = *ccp;
1170 struct neon_type optype;
1171
1172 if (*str == '.')
1173 {
1174 if (parse_neon_type (&optype, &str) == SUCCESS)
1175 {
1176 if (optype.elems == 1)
1177 *vectype = optype.el[0];
1178 else
1179 {
1180 first_error (_("only one type should be specified for operand"));
1181 return FAIL;
1182 }
1183 }
1184 else
1185 {
1186 first_error (_("vector type expected"));
1187 return FAIL;
1188 }
1189 }
1190 else
1191 return FAIL;
1192
1193 *ccp = str;
1194
1195 return SUCCESS;
1196 }
1197
1198 /* Special meanings for indices (which have a range of 0-7), which will fit into
1199 a 4-bit integer. */
1200
1201 #define NEON_ALL_LANES 15
1202 #define NEON_INTERLEAVE_LANES 14
1203
1204 /* Parse either a register or a scalar, with an optional type. Return the
1205 register number, and optionally fill in the actual type of the register
1206 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1207 type/index information in *TYPEINFO. */
1208
1209 static int
1210 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1211 enum arm_reg_type *rtype,
1212 struct neon_typed_alias *typeinfo)
1213 {
1214 char *str = *ccp;
1215 struct reg_entry *reg = arm_reg_parse_multi (&str);
1216 struct neon_typed_alias atype;
1217 struct neon_type_el parsetype;
1218
1219 atype.defined = 0;
1220 atype.index = -1;
1221 atype.eltype.type = NT_invtype;
1222 atype.eltype.size = -1;
1223
1224 /* Try alternate syntax for some types of register. Note these are mutually
1225 exclusive with the Neon syntax extensions. */
1226 if (reg == NULL)
1227 {
1228 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1229 if (altreg != FAIL)
1230 *ccp = str;
1231 if (typeinfo)
1232 *typeinfo = atype;
1233 return altreg;
1234 }
1235
1236 /* Undo polymorphism for Neon D and Q registers. */
1237 if (type == REG_TYPE_NDQ
1238 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1239 type = reg->type;
1240
1241 if (type != reg->type)
1242 return FAIL;
1243
1244 if (reg->neon)
1245 atype = *reg->neon;
1246
1247 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1248 {
1249 if ((atype.defined & NTA_HASTYPE) != 0)
1250 {
1251 first_error (_("can't redefine type for operand"));
1252 return FAIL;
1253 }
1254 atype.defined |= NTA_HASTYPE;
1255 atype.eltype = parsetype;
1256 }
1257
1258 if (skip_past_char (&str, '[') == SUCCESS)
1259 {
1260 if (type != REG_TYPE_VFD)
1261 {
1262 first_error (_("only D registers may be indexed"));
1263 return FAIL;
1264 }
1265
1266 if ((atype.defined & NTA_HASINDEX) != 0)
1267 {
1268 first_error (_("can't change index for operand"));
1269 return FAIL;
1270 }
1271
1272 atype.defined |= NTA_HASINDEX;
1273
1274 if (skip_past_char (&str, ']') == SUCCESS)
1275 atype.index = NEON_ALL_LANES;
1276 else
1277 {
1278 expressionS exp;
1279
1280 my_get_expression (&exp, &str, GE_NO_PREFIX);
1281
1282 if (exp.X_op != O_constant)
1283 {
1284 first_error (_("constant expression required"));
1285 return FAIL;
1286 }
1287
1288 if (skip_past_char (&str, ']') == FAIL)
1289 return FAIL;
1290
1291 atype.index = exp.X_add_number;
1292 }
1293 }
1294
1295 if (typeinfo)
1296 *typeinfo = atype;
1297
1298 if (rtype)
1299 *rtype = type;
1300
1301 *ccp = str;
1302
1303 return reg->number;
1304 }
1305
1306 /* Like arm_reg_parse, but allow allow the following extra features:
1307 - If RTYPE is non-zero, return the (possibly restricted) type of the
1308 register (e.g. Neon double or quad reg when either has been requested).
1309 - If this is a Neon vector type with additional type information, fill
1310 in the struct pointed to by VECTYPE (if non-NULL).
1311 This function will fault on encountering a scalar.
1312 */
1313
1314 static int
1315 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1316 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1317 {
1318 struct neon_typed_alias atype;
1319 char *str = *ccp;
1320 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1321
1322 if (reg == FAIL)
1323 return FAIL;
1324
1325 /* Do not allow a scalar (reg+index) to parse as a register. */
1326 if ((atype.defined & NTA_HASINDEX) != 0)
1327 {
1328 first_error (_("register operand expected, but got scalar"));
1329 return FAIL;
1330 }
1331
1332 if (vectype)
1333 *vectype = atype.eltype;
1334
1335 *ccp = str;
1336
1337 return reg;
1338 }
1339
1340 #define NEON_SCALAR_REG(X) ((X) >> 4)
1341 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1342
1343 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1344 have enough information to be able to do a good job bounds-checking. So, we
1345 just do easy checks here, and do further checks later. */
1346
1347 static int
1348 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1349 {
1350 int reg;
1351 char *str = *ccp;
1352 struct neon_typed_alias atype;
1353
1354 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1355
1356 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1357 return FAIL;
1358
1359 if (atype.index == NEON_ALL_LANES)
1360 {
1361 first_error (_("scalar must have an index"));
1362 return FAIL;
1363 }
1364 else if (atype.index >= 64 / elsize)
1365 {
1366 first_error (_("scalar index out of range"));
1367 return FAIL;
1368 }
1369
1370 if (type)
1371 *type = atype.eltype;
1372
1373 *ccp = str;
1374
1375 return reg * 16 + atype.index;
1376 }
1377
1378 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1379 static long
1380 parse_reg_list (char ** strp)
1381 {
1382 char * str = * strp;
1383 long range = 0;
1384 int another_range;
1385
1386 /* We come back here if we get ranges concatenated by '+' or '|'. */
1387 do
1388 {
1389 another_range = 0;
1390
1391 if (*str == '{')
1392 {
1393 int in_range = 0;
1394 int cur_reg = -1;
1395
1396 str++;
1397 do
1398 {
1399 int reg;
1400
1401 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1402 {
1403 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1404 return FAIL;
1405 }
1406
1407 if (in_range)
1408 {
1409 int i;
1410
1411 if (reg <= cur_reg)
1412 {
1413 first_error (_("bad range in register list"));
1414 return FAIL;
1415 }
1416
1417 for (i = cur_reg + 1; i < reg; i++)
1418 {
1419 if (range & (1 << i))
1420 as_tsktsk
1421 (_("Warning: duplicated register (r%d) in register list"),
1422 i);
1423 else
1424 range |= 1 << i;
1425 }
1426 in_range = 0;
1427 }
1428
1429 if (range & (1 << reg))
1430 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1431 reg);
1432 else if (reg <= cur_reg)
1433 as_tsktsk (_("Warning: register range not in ascending order"));
1434
1435 range |= 1 << reg;
1436 cur_reg = reg;
1437 }
1438 while (skip_past_comma (&str) != FAIL
1439 || (in_range = 1, *str++ == '-'));
1440 str--;
1441
1442 if (*str++ != '}')
1443 {
1444 first_error (_("missing `}'"));
1445 return FAIL;
1446 }
1447 }
1448 else
1449 {
1450 expressionS expr;
1451
1452 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1453 return FAIL;
1454
1455 if (expr.X_op == O_constant)
1456 {
1457 if (expr.X_add_number
1458 != (expr.X_add_number & 0x0000ffff))
1459 {
1460 inst.error = _("invalid register mask");
1461 return FAIL;
1462 }
1463
1464 if ((range & expr.X_add_number) != 0)
1465 {
1466 int regno = range & expr.X_add_number;
1467
1468 regno &= -regno;
1469 regno = (1 << regno) - 1;
1470 as_tsktsk
1471 (_("Warning: duplicated register (r%d) in register list"),
1472 regno);
1473 }
1474
1475 range |= expr.X_add_number;
1476 }
1477 else
1478 {
1479 if (inst.reloc.type != 0)
1480 {
1481 inst.error = _("expression too complex");
1482 return FAIL;
1483 }
1484
1485 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1486 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1487 inst.reloc.pc_rel = 0;
1488 }
1489 }
1490
1491 if (*str == '|' || *str == '+')
1492 {
1493 str++;
1494 another_range = 1;
1495 }
1496 }
1497 while (another_range);
1498
1499 *strp = str;
1500 return range;
1501 }
1502
1503 /* Types of registers in a list. */
1504
1505 enum reg_list_els
1506 {
1507 REGLIST_VFP_S,
1508 REGLIST_VFP_D,
1509 REGLIST_NEON_D
1510 };
1511
1512 /* Parse a VFP register list. If the string is invalid return FAIL.
1513 Otherwise return the number of registers, and set PBASE to the first
1514 register. Parses registers of type ETYPE.
1515 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1516 - Q registers can be used to specify pairs of D registers
1517 - { } can be omitted from around a singleton register list
1518 FIXME: This is not implemented, as it would require backtracking in
1519 some cases, e.g.:
1520 vtbl.8 d3,d4,d5
1521 This could be done (the meaning isn't really ambiguous), but doesn't
1522 fit in well with the current parsing framework.
1523 - 32 D registers may be used (also true for VFPv3).
1524 FIXME: Types are ignored in these register lists, which is probably a
1525 bug. */
1526
1527 static int
1528 parse_vfp_reg_list (char **str, unsigned int *pbase, enum reg_list_els etype)
1529 {
1530 int base_reg;
1531 int new_base;
1532 enum arm_reg_type regtype = 0;
1533 int max_regs = 0;
1534 int count = 0;
1535 int warned = 0;
1536 unsigned long mask = 0;
1537 int i;
1538
1539 if (**str != '{')
1540 {
1541 inst.error = _("expecting {");
1542 return FAIL;
1543 }
1544
1545 (*str)++;
1546
1547 switch (etype)
1548 {
1549 case REGLIST_VFP_S:
1550 regtype = REG_TYPE_VFS;
1551 max_regs = 32;
1552 break;
1553
1554 case REGLIST_VFP_D:
1555 regtype = REG_TYPE_VFD;
1556 /* VFPv3 allows 32 D registers. */
1557 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1558 {
1559 max_regs = 32;
1560 if (thumb_mode)
1561 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1562 fpu_vfp_ext_v3);
1563 else
1564 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1565 fpu_vfp_ext_v3);
1566 }
1567 else
1568 max_regs = 16;
1569 break;
1570
1571 case REGLIST_NEON_D:
1572 regtype = REG_TYPE_NDQ;
1573 max_regs = 32;
1574 break;
1575 }
1576
1577 base_reg = max_regs;
1578
1579 do
1580 {
1581 int setmask = 1, addregs = 1;
1582
1583 new_base = arm_typed_reg_parse (str, regtype, &regtype, NULL);
1584
1585 if (new_base == FAIL)
1586 {
1587 first_error (_(reg_expected_msgs[regtype]));
1588 return FAIL;
1589 }
1590
1591 /* Note: a value of 2 * n is returned for the register Q<n>. */
1592 if (regtype == REG_TYPE_NQ)
1593 {
1594 setmask = 3;
1595 addregs = 2;
1596 }
1597
1598 if (new_base < base_reg)
1599 base_reg = new_base;
1600
1601 if (mask & (setmask << new_base))
1602 {
1603 first_error (_("invalid register list"));
1604 return FAIL;
1605 }
1606
1607 if ((mask >> new_base) != 0 && ! warned)
1608 {
1609 as_tsktsk (_("register list not in ascending order"));
1610 warned = 1;
1611 }
1612
1613 mask |= setmask << new_base;
1614 count += addregs;
1615
1616 if (**str == '-') /* We have the start of a range expression */
1617 {
1618 int high_range;
1619
1620 (*str)++;
1621
1622 if ((high_range = arm_typed_reg_parse (str, regtype, NULL, NULL))
1623 == FAIL)
1624 {
1625 inst.error = gettext (reg_expected_msgs[regtype]);
1626 return FAIL;
1627 }
1628
1629 if (regtype == REG_TYPE_NQ)
1630 high_range = high_range + 1;
1631
1632 if (high_range <= new_base)
1633 {
1634 inst.error = _("register range not in ascending order");
1635 return FAIL;
1636 }
1637
1638 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1639 {
1640 if (mask & (setmask << new_base))
1641 {
1642 inst.error = _("invalid register list");
1643 return FAIL;
1644 }
1645
1646 mask |= setmask << new_base;
1647 count += addregs;
1648 }
1649 }
1650 }
1651 while (skip_past_comma (str) != FAIL);
1652
1653 (*str)++;
1654
1655 /* Sanity check -- should have raised a parse error above. */
1656 if (count == 0 || count > max_regs)
1657 abort ();
1658
1659 *pbase = base_reg;
1660
1661 /* Final test -- the registers must be consecutive. */
1662 mask >>= base_reg;
1663 for (i = 0; i < count; i++)
1664 {
1665 if ((mask & (1u << i)) == 0)
1666 {
1667 inst.error = _("non-contiguous register range");
1668 return FAIL;
1669 }
1670 }
1671
1672 return count;
1673 }
1674
1675 /* True if two alias types are the same. */
1676
1677 static int
1678 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1679 {
1680 if (!a && !b)
1681 return 1;
1682
1683 if (!a || !b)
1684 return 0;
1685
1686 if (a->defined != b->defined)
1687 return 0;
1688
1689 if ((a->defined & NTA_HASTYPE) != 0
1690 && (a->eltype.type != b->eltype.type
1691 || a->eltype.size != b->eltype.size))
1692 return 0;
1693
1694 if ((a->defined & NTA_HASINDEX) != 0
1695 && (a->index != b->index))
1696 return 0;
1697
1698 return 1;
1699 }
1700
1701 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1702 The base register is put in *PBASE.
1703 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1704 the return value.
1705 The register stride (minus one) is put in bit 4 of the return value.
1706 Bits [6:5] encode the list length (minus one).
1707 The type of the list elements is put in *ELTYPE, if non-NULL. */
1708
1709 #define NEON_LANE(X) ((X) & 0xf)
1710 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1711 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1712
1713 static int
1714 parse_neon_el_struct_list (char **str, unsigned *pbase,
1715 struct neon_type_el *eltype)
1716 {
1717 char *ptr = *str;
1718 int base_reg = -1;
1719 int reg_incr = -1;
1720 int count = 0;
1721 int lane = -1;
1722 int leading_brace = 0;
1723 enum arm_reg_type rtype = REG_TYPE_NDQ;
1724 int addregs = 1;
1725 const char *const incr_error = "register stride must be 1 or 2";
1726 const char *const type_error = "mismatched element/structure types in list";
1727 struct neon_typed_alias firsttype;
1728
1729 if (skip_past_char (&ptr, '{') == SUCCESS)
1730 leading_brace = 1;
1731
1732 do
1733 {
1734 struct neon_typed_alias atype;
1735 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1736
1737 if (getreg == FAIL)
1738 {
1739 first_error (_(reg_expected_msgs[rtype]));
1740 return FAIL;
1741 }
1742
1743 if (base_reg == -1)
1744 {
1745 base_reg = getreg;
1746 if (rtype == REG_TYPE_NQ)
1747 {
1748 reg_incr = 1;
1749 addregs = 2;
1750 }
1751 firsttype = atype;
1752 }
1753 else if (reg_incr == -1)
1754 {
1755 reg_incr = getreg - base_reg;
1756 if (reg_incr < 1 || reg_incr > 2)
1757 {
1758 first_error (_(incr_error));
1759 return FAIL;
1760 }
1761 }
1762 else if (getreg != base_reg + reg_incr * count)
1763 {
1764 first_error (_(incr_error));
1765 return FAIL;
1766 }
1767
1768 if (!neon_alias_types_same (&atype, &firsttype))
1769 {
1770 first_error (_(type_error));
1771 return FAIL;
1772 }
1773
1774 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1775 modes. */
1776 if (ptr[0] == '-')
1777 {
1778 struct neon_typed_alias htype;
1779 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1780 if (lane == -1)
1781 lane = NEON_INTERLEAVE_LANES;
1782 else if (lane != NEON_INTERLEAVE_LANES)
1783 {
1784 first_error (_(type_error));
1785 return FAIL;
1786 }
1787 if (reg_incr == -1)
1788 reg_incr = 1;
1789 else if (reg_incr != 1)
1790 {
1791 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1792 return FAIL;
1793 }
1794 ptr++;
1795 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1796 if (hireg == FAIL)
1797 {
1798 first_error (_(reg_expected_msgs[rtype]));
1799 return FAIL;
1800 }
1801 if (!neon_alias_types_same (&htype, &firsttype))
1802 {
1803 first_error (_(type_error));
1804 return FAIL;
1805 }
1806 count += hireg + dregs - getreg;
1807 continue;
1808 }
1809
1810 /* If we're using Q registers, we can't use [] or [n] syntax. */
1811 if (rtype == REG_TYPE_NQ)
1812 {
1813 count += 2;
1814 continue;
1815 }
1816
1817 if ((atype.defined & NTA_HASINDEX) != 0)
1818 {
1819 if (lane == -1)
1820 lane = atype.index;
1821 else if (lane != atype.index)
1822 {
1823 first_error (_(type_error));
1824 return FAIL;
1825 }
1826 }
1827 else if (lane == -1)
1828 lane = NEON_INTERLEAVE_LANES;
1829 else if (lane != NEON_INTERLEAVE_LANES)
1830 {
1831 first_error (_(type_error));
1832 return FAIL;
1833 }
1834 count++;
1835 }
1836 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1837
1838 /* No lane set by [x]. We must be interleaving structures. */
1839 if (lane == -1)
1840 lane = NEON_INTERLEAVE_LANES;
1841
1842 /* Sanity check. */
1843 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1844 || (count > 1 && reg_incr == -1))
1845 {
1846 first_error (_("error parsing element/structure list"));
1847 return FAIL;
1848 }
1849
1850 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1851 {
1852 first_error (_("expected }"));
1853 return FAIL;
1854 }
1855
1856 if (reg_incr == -1)
1857 reg_incr = 1;
1858
1859 if (eltype)
1860 *eltype = firsttype.eltype;
1861
1862 *pbase = base_reg;
1863 *str = ptr;
1864
1865 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1866 }
1867
1868 /* Parse an explicit relocation suffix on an expression. This is
1869 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1870 arm_reloc_hsh contains no entries, so this function can only
1871 succeed if there is no () after the word. Returns -1 on error,
1872 BFD_RELOC_UNUSED if there wasn't any suffix. */
1873 static int
1874 parse_reloc (char **str)
1875 {
1876 struct reloc_entry *r;
1877 char *p, *q;
1878
1879 if (**str != '(')
1880 return BFD_RELOC_UNUSED;
1881
1882 p = *str + 1;
1883 q = p;
1884
1885 while (*q && *q != ')' && *q != ',')
1886 q++;
1887 if (*q != ')')
1888 return -1;
1889
1890 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1891 return -1;
1892
1893 *str = q + 1;
1894 return r->reloc;
1895 }
1896
1897 /* Directives: register aliases. */
1898
1899 static struct reg_entry *
1900 insert_reg_alias (char *str, int number, int type)
1901 {
1902 struct reg_entry *new;
1903 const char *name;
1904
1905 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1906 {
1907 if (new->builtin)
1908 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1909
1910 /* Only warn about a redefinition if it's not defined as the
1911 same register. */
1912 else if (new->number != number || new->type != type)
1913 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1914
1915 return 0;
1916 }
1917
1918 name = xstrdup (str);
1919 new = xmalloc (sizeof (struct reg_entry));
1920
1921 new->name = name;
1922 new->number = number;
1923 new->type = type;
1924 new->builtin = FALSE;
1925 new->neon = NULL;
1926
1927 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1928 abort ();
1929
1930 return new;
1931 }
1932
1933 static void
1934 insert_neon_reg_alias (char *str, int number, int type,
1935 struct neon_typed_alias *atype)
1936 {
1937 struct reg_entry *reg = insert_reg_alias (str, number, type);
1938
1939 if (!reg)
1940 {
1941 first_error (_("attempt to redefine typed alias"));
1942 return;
1943 }
1944
1945 if (atype)
1946 {
1947 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
1948 *reg->neon = *atype;
1949 }
1950 }
1951
1952 /* Look for the .req directive. This is of the form:
1953
1954 new_register_name .req existing_register_name
1955
1956 If we find one, or if it looks sufficiently like one that we want to
1957 handle any error here, return non-zero. Otherwise return zero. */
1958
1959 static int
1960 create_register_alias (char * newname, char *p)
1961 {
1962 struct reg_entry *old;
1963 char *oldname, *nbuf;
1964 size_t nlen;
1965
1966 /* The input scrubber ensures that whitespace after the mnemonic is
1967 collapsed to single spaces. */
1968 oldname = p;
1969 if (strncmp (oldname, " .req ", 6) != 0)
1970 return 0;
1971
1972 oldname += 6;
1973 if (*oldname == '\0')
1974 return 0;
1975
1976 old = hash_find (arm_reg_hsh, oldname);
1977 if (!old)
1978 {
1979 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1980 return 1;
1981 }
1982
1983 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1984 the desired alias name, and p points to its end. If not, then
1985 the desired alias name is in the global original_case_string. */
1986 #ifdef TC_CASE_SENSITIVE
1987 nlen = p - newname;
1988 #else
1989 newname = original_case_string;
1990 nlen = strlen (newname);
1991 #endif
1992
1993 nbuf = alloca (nlen + 1);
1994 memcpy (nbuf, newname, nlen);
1995 nbuf[nlen] = '\0';
1996
1997 /* Create aliases under the new name as stated; an all-lowercase
1998 version of the new name; and an all-uppercase version of the new
1999 name. */
2000 insert_reg_alias (nbuf, old->number, old->type);
2001
2002 for (p = nbuf; *p; p++)
2003 *p = TOUPPER (*p);
2004
2005 if (strncmp (nbuf, newname, nlen))
2006 insert_reg_alias (nbuf, old->number, old->type);
2007
2008 for (p = nbuf; *p; p++)
2009 *p = TOLOWER (*p);
2010
2011 if (strncmp (nbuf, newname, nlen))
2012 insert_reg_alias (nbuf, old->number, old->type);
2013
2014 return 1;
2015 }
2016
2017 /* Create a Neon typed/indexed register alias using directives, e.g.:
2018 X .dn d5.s32[1]
2019 Y .qn 6.s16
2020 Z .dn d7
2021 T .dn Z[0]
2022 These typed registers can be used instead of the types specified after the
2023 Neon mnemonic, so long as all operands given have types. Types can also be
2024 specified directly, e.g.:
2025 vadd d0.s32, d1.s32, d2.s32
2026 */
2027
2028 static int
2029 create_neon_reg_alias (char *newname, char *p)
2030 {
2031 enum arm_reg_type basetype;
2032 struct reg_entry *basereg;
2033 struct reg_entry mybasereg;
2034 struct neon_type ntype;
2035 struct neon_typed_alias typeinfo;
2036 char *namebuf, *nameend;
2037 int namelen;
2038
2039 typeinfo.defined = 0;
2040 typeinfo.eltype.type = NT_invtype;
2041 typeinfo.eltype.size = -1;
2042 typeinfo.index = -1;
2043
2044 nameend = p;
2045
2046 if (strncmp (p, " .dn ", 5) == 0)
2047 basetype = REG_TYPE_VFD;
2048 else if (strncmp (p, " .qn ", 5) == 0)
2049 basetype = REG_TYPE_NQ;
2050 else
2051 return 0;
2052
2053 p += 5;
2054
2055 if (*p == '\0')
2056 return 0;
2057
2058 basereg = arm_reg_parse_multi (&p);
2059
2060 if (basereg && basereg->type != basetype)
2061 {
2062 as_bad (_("bad type for register"));
2063 return 0;
2064 }
2065
2066 if (basereg == NULL)
2067 {
2068 expressionS exp;
2069 /* Try parsing as an integer. */
2070 my_get_expression (&exp, &p, GE_NO_PREFIX);
2071 if (exp.X_op != O_constant)
2072 {
2073 as_bad (_("expression must be constant"));
2074 return 0;
2075 }
2076 basereg = &mybasereg;
2077 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2078 : exp.X_add_number;
2079 basereg->neon = 0;
2080 }
2081
2082 if (basereg->neon)
2083 typeinfo = *basereg->neon;
2084
2085 if (parse_neon_type (&ntype, &p) == SUCCESS)
2086 {
2087 /* We got a type. */
2088 if (typeinfo.defined & NTA_HASTYPE)
2089 {
2090 as_bad (_("can't redefine the type of a register alias"));
2091 return 0;
2092 }
2093
2094 typeinfo.defined |= NTA_HASTYPE;
2095 if (ntype.elems != 1)
2096 {
2097 as_bad (_("you must specify a single type only"));
2098 return 0;
2099 }
2100 typeinfo.eltype = ntype.el[0];
2101 }
2102
2103 if (skip_past_char (&p, '[') == SUCCESS)
2104 {
2105 expressionS exp;
2106 /* We got a scalar index. */
2107
2108 if (typeinfo.defined & NTA_HASINDEX)
2109 {
2110 as_bad (_("can't redefine the index of a scalar alias"));
2111 return 0;
2112 }
2113
2114 my_get_expression (&exp, &p, GE_NO_PREFIX);
2115
2116 if (exp.X_op != O_constant)
2117 {
2118 as_bad (_("scalar index must be constant"));
2119 return 0;
2120 }
2121
2122 typeinfo.defined |= NTA_HASINDEX;
2123 typeinfo.index = exp.X_add_number;
2124
2125 if (skip_past_char (&p, ']') == FAIL)
2126 {
2127 as_bad (_("expecting ]"));
2128 return 0;
2129 }
2130 }
2131
2132 namelen = nameend - newname;
2133 namebuf = alloca (namelen + 1);
2134 strncpy (namebuf, newname, namelen);
2135 namebuf[namelen] = '\0';
2136
2137 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2138 typeinfo.defined != 0 ? &typeinfo : NULL);
2139
2140 /* Insert name in all uppercase. */
2141 for (p = namebuf; *p; p++)
2142 *p = TOUPPER (*p);
2143
2144 if (strncmp (namebuf, newname, namelen))
2145 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2146 typeinfo.defined != 0 ? &typeinfo : NULL);
2147
2148 /* Insert name in all lowercase. */
2149 for (p = namebuf; *p; p++)
2150 *p = TOLOWER (*p);
2151
2152 if (strncmp (namebuf, newname, namelen))
2153 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2154 typeinfo.defined != 0 ? &typeinfo : NULL);
2155
2156 return 1;
2157 }
2158
2159 /* Should never be called, as .req goes between the alias and the
2160 register name, not at the beginning of the line. */
2161 static void
2162 s_req (int a ATTRIBUTE_UNUSED)
2163 {
2164 as_bad (_("invalid syntax for .req directive"));
2165 }
2166
2167 static void
2168 s_dn (int a ATTRIBUTE_UNUSED)
2169 {
2170 as_bad (_("invalid syntax for .dn directive"));
2171 }
2172
2173 static void
2174 s_qn (int a ATTRIBUTE_UNUSED)
2175 {
2176 as_bad (_("invalid syntax for .qn directive"));
2177 }
2178
2179 /* The .unreq directive deletes an alias which was previously defined
2180 by .req. For example:
2181
2182 my_alias .req r11
2183 .unreq my_alias */
2184
2185 static void
2186 s_unreq (int a ATTRIBUTE_UNUSED)
2187 {
2188 char * name;
2189 char saved_char;
2190
2191 name = input_line_pointer;
2192
2193 while (*input_line_pointer != 0
2194 && *input_line_pointer != ' '
2195 && *input_line_pointer != '\n')
2196 ++input_line_pointer;
2197
2198 saved_char = *input_line_pointer;
2199 *input_line_pointer = 0;
2200
2201 if (!*name)
2202 as_bad (_("invalid syntax for .unreq directive"));
2203 else
2204 {
2205 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2206
2207 if (!reg)
2208 as_bad (_("unknown register alias '%s'"), name);
2209 else if (reg->builtin)
2210 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2211 name);
2212 else
2213 {
2214 hash_delete (arm_reg_hsh, name);
2215 free ((char *) reg->name);
2216 if (reg->neon)
2217 free (reg->neon);
2218 free (reg);
2219 }
2220 }
2221
2222 *input_line_pointer = saved_char;
2223 demand_empty_rest_of_line ();
2224 }
2225
2226 /* Directives: Instruction set selection. */
2227
2228 #ifdef OBJ_ELF
2229 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2230 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2231 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2232 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2233
2234 static enum mstate mapstate = MAP_UNDEFINED;
2235
2236 static void
2237 mapping_state (enum mstate state)
2238 {
2239 symbolS * symbolP;
2240 const char * symname;
2241 int type;
2242
2243 if (mapstate == state)
2244 /* The mapping symbol has already been emitted.
2245 There is nothing else to do. */
2246 return;
2247
2248 mapstate = state;
2249
2250 switch (state)
2251 {
2252 case MAP_DATA:
2253 symname = "$d";
2254 type = BSF_NO_FLAGS;
2255 break;
2256 case MAP_ARM:
2257 symname = "$a";
2258 type = BSF_NO_FLAGS;
2259 break;
2260 case MAP_THUMB:
2261 symname = "$t";
2262 type = BSF_NO_FLAGS;
2263 break;
2264 case MAP_UNDEFINED:
2265 return;
2266 default:
2267 abort ();
2268 }
2269
2270 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2271
2272 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2273 symbol_table_insert (symbolP);
2274 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2275
2276 switch (state)
2277 {
2278 case MAP_ARM:
2279 THUMB_SET_FUNC (symbolP, 0);
2280 ARM_SET_THUMB (symbolP, 0);
2281 ARM_SET_INTERWORK (symbolP, support_interwork);
2282 break;
2283
2284 case MAP_THUMB:
2285 THUMB_SET_FUNC (symbolP, 1);
2286 ARM_SET_THUMB (symbolP, 1);
2287 ARM_SET_INTERWORK (symbolP, support_interwork);
2288 break;
2289
2290 case MAP_DATA:
2291 default:
2292 return;
2293 }
2294 }
2295 #else
2296 #define mapping_state(x) /* nothing */
2297 #endif
2298
2299 /* Find the real, Thumb encoded start of a Thumb function. */
2300
2301 static symbolS *
2302 find_real_start (symbolS * symbolP)
2303 {
2304 char * real_start;
2305 const char * name = S_GET_NAME (symbolP);
2306 symbolS * new_target;
2307
2308 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2309 #define STUB_NAME ".real_start_of"
2310
2311 if (name == NULL)
2312 abort ();
2313
2314 /* The compiler may generate BL instructions to local labels because
2315 it needs to perform a branch to a far away location. These labels
2316 do not have a corresponding ".real_start_of" label. We check
2317 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2318 the ".real_start_of" convention for nonlocal branches. */
2319 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2320 return symbolP;
2321
2322 real_start = ACONCAT ((STUB_NAME, name, NULL));
2323 new_target = symbol_find (real_start);
2324
2325 if (new_target == NULL)
2326 {
2327 as_warn ("Failed to find real start of function: %s\n", name);
2328 new_target = symbolP;
2329 }
2330
2331 return new_target;
2332 }
2333
2334 static void
2335 opcode_select (int width)
2336 {
2337 switch (width)
2338 {
2339 case 16:
2340 if (! thumb_mode)
2341 {
2342 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2343 as_bad (_("selected processor does not support THUMB opcodes"));
2344
2345 thumb_mode = 1;
2346 /* No need to force the alignment, since we will have been
2347 coming from ARM mode, which is word-aligned. */
2348 record_alignment (now_seg, 1);
2349 }
2350 mapping_state (MAP_THUMB);
2351 break;
2352
2353 case 32:
2354 if (thumb_mode)
2355 {
2356 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2357 as_bad (_("selected processor does not support ARM opcodes"));
2358
2359 thumb_mode = 0;
2360
2361 if (!need_pass_2)
2362 frag_align (2, 0, 0);
2363
2364 record_alignment (now_seg, 1);
2365 }
2366 mapping_state (MAP_ARM);
2367 break;
2368
2369 default:
2370 as_bad (_("invalid instruction size selected (%d)"), width);
2371 }
2372 }
2373
2374 static void
2375 s_arm (int ignore ATTRIBUTE_UNUSED)
2376 {
2377 opcode_select (32);
2378 demand_empty_rest_of_line ();
2379 }
2380
2381 static void
2382 s_thumb (int ignore ATTRIBUTE_UNUSED)
2383 {
2384 opcode_select (16);
2385 demand_empty_rest_of_line ();
2386 }
2387
2388 static void
2389 s_code (int unused ATTRIBUTE_UNUSED)
2390 {
2391 int temp;
2392
2393 temp = get_absolute_expression ();
2394 switch (temp)
2395 {
2396 case 16:
2397 case 32:
2398 opcode_select (temp);
2399 break;
2400
2401 default:
2402 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2403 }
2404 }
2405
2406 static void
2407 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2408 {
2409 /* If we are not already in thumb mode go into it, EVEN if
2410 the target processor does not support thumb instructions.
2411 This is used by gcc/config/arm/lib1funcs.asm for example
2412 to compile interworking support functions even if the
2413 target processor should not support interworking. */
2414 if (! thumb_mode)
2415 {
2416 thumb_mode = 2;
2417 record_alignment (now_seg, 1);
2418 }
2419
2420 demand_empty_rest_of_line ();
2421 }
2422
2423 static void
2424 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2425 {
2426 s_thumb (0);
2427
2428 /* The following label is the name/address of the start of a Thumb function.
2429 We need to know this for the interworking support. */
2430 label_is_thumb_function_name = TRUE;
2431 }
2432
2433 /* Perform a .set directive, but also mark the alias as
2434 being a thumb function. */
2435
2436 static void
2437 s_thumb_set (int equiv)
2438 {
2439 /* XXX the following is a duplicate of the code for s_set() in read.c
2440 We cannot just call that code as we need to get at the symbol that
2441 is created. */
2442 char * name;
2443 char delim;
2444 char * end_name;
2445 symbolS * symbolP;
2446
2447 /* Especial apologies for the random logic:
2448 This just grew, and could be parsed much more simply!
2449 Dean - in haste. */
2450 name = input_line_pointer;
2451 delim = get_symbol_end ();
2452 end_name = input_line_pointer;
2453 *end_name = delim;
2454
2455 if (*input_line_pointer != ',')
2456 {
2457 *end_name = 0;
2458 as_bad (_("expected comma after name \"%s\""), name);
2459 *end_name = delim;
2460 ignore_rest_of_line ();
2461 return;
2462 }
2463
2464 input_line_pointer++;
2465 *end_name = 0;
2466
2467 if (name[0] == '.' && name[1] == '\0')
2468 {
2469 /* XXX - this should not happen to .thumb_set. */
2470 abort ();
2471 }
2472
2473 if ((symbolP = symbol_find (name)) == NULL
2474 && (symbolP = md_undefined_symbol (name)) == NULL)
2475 {
2476 #ifndef NO_LISTING
2477 /* When doing symbol listings, play games with dummy fragments living
2478 outside the normal fragment chain to record the file and line info
2479 for this symbol. */
2480 if (listing & LISTING_SYMBOLS)
2481 {
2482 extern struct list_info_struct * listing_tail;
2483 fragS * dummy_frag = xmalloc (sizeof (fragS));
2484
2485 memset (dummy_frag, 0, sizeof (fragS));
2486 dummy_frag->fr_type = rs_fill;
2487 dummy_frag->line = listing_tail;
2488 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2489 dummy_frag->fr_symbol = symbolP;
2490 }
2491 else
2492 #endif
2493 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2494
2495 #ifdef OBJ_COFF
2496 /* "set" symbols are local unless otherwise specified. */
2497 SF_SET_LOCAL (symbolP);
2498 #endif /* OBJ_COFF */
2499 } /* Make a new symbol. */
2500
2501 symbol_table_insert (symbolP);
2502
2503 * end_name = delim;
2504
2505 if (equiv
2506 && S_IS_DEFINED (symbolP)
2507 && S_GET_SEGMENT (symbolP) != reg_section)
2508 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2509
2510 pseudo_set (symbolP);
2511
2512 demand_empty_rest_of_line ();
2513
2514 /* XXX Now we come to the Thumb specific bit of code. */
2515
2516 THUMB_SET_FUNC (symbolP, 1);
2517 ARM_SET_THUMB (symbolP, 1);
2518 #if defined OBJ_ELF || defined OBJ_COFF
2519 ARM_SET_INTERWORK (symbolP, support_interwork);
2520 #endif
2521 }
2522
2523 /* Directives: Mode selection. */
2524
2525 /* .syntax [unified|divided] - choose the new unified syntax
2526 (same for Arm and Thumb encoding, modulo slight differences in what
2527 can be represented) or the old divergent syntax for each mode. */
2528 static void
2529 s_syntax (int unused ATTRIBUTE_UNUSED)
2530 {
2531 char *name, delim;
2532
2533 name = input_line_pointer;
2534 delim = get_symbol_end ();
2535
2536 if (!strcasecmp (name, "unified"))
2537 unified_syntax = TRUE;
2538 else if (!strcasecmp (name, "divided"))
2539 unified_syntax = FALSE;
2540 else
2541 {
2542 as_bad (_("unrecognized syntax mode \"%s\""), name);
2543 return;
2544 }
2545 *input_line_pointer = delim;
2546 demand_empty_rest_of_line ();
2547 }
2548
2549 /* Directives: sectioning and alignment. */
2550
2551 /* Same as s_align_ptwo but align 0 => align 2. */
2552
2553 static void
2554 s_align (int unused ATTRIBUTE_UNUSED)
2555 {
2556 int temp;
2557 long temp_fill;
2558 long max_alignment = 15;
2559
2560 temp = get_absolute_expression ();
2561 if (temp > max_alignment)
2562 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2563 else if (temp < 0)
2564 {
2565 as_bad (_("alignment negative. 0 assumed."));
2566 temp = 0;
2567 }
2568
2569 if (*input_line_pointer == ',')
2570 {
2571 input_line_pointer++;
2572 temp_fill = get_absolute_expression ();
2573 }
2574 else
2575 temp_fill = 0;
2576
2577 if (!temp)
2578 temp = 2;
2579
2580 /* Only make a frag if we HAVE to. */
2581 if (temp && !need_pass_2)
2582 frag_align (temp, (int) temp_fill, 0);
2583 demand_empty_rest_of_line ();
2584
2585 record_alignment (now_seg, temp);
2586 }
2587
2588 static void
2589 s_bss (int ignore ATTRIBUTE_UNUSED)
2590 {
2591 /* We don't support putting frags in the BSS segment, we fake it by
2592 marking in_bss, then looking at s_skip for clues. */
2593 subseg_set (bss_section, 0);
2594 demand_empty_rest_of_line ();
2595 mapping_state (MAP_DATA);
2596 }
2597
2598 static void
2599 s_even (int ignore ATTRIBUTE_UNUSED)
2600 {
2601 /* Never make frag if expect extra pass. */
2602 if (!need_pass_2)
2603 frag_align (1, 0, 0);
2604
2605 record_alignment (now_seg, 1);
2606
2607 demand_empty_rest_of_line ();
2608 }
2609
2610 /* Directives: Literal pools. */
2611
2612 static literal_pool *
2613 find_literal_pool (void)
2614 {
2615 literal_pool * pool;
2616
2617 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2618 {
2619 if (pool->section == now_seg
2620 && pool->sub_section == now_subseg)
2621 break;
2622 }
2623
2624 return pool;
2625 }
2626
2627 static literal_pool *
2628 find_or_make_literal_pool (void)
2629 {
2630 /* Next literal pool ID number. */
2631 static unsigned int latest_pool_num = 1;
2632 literal_pool * pool;
2633
2634 pool = find_literal_pool ();
2635
2636 if (pool == NULL)
2637 {
2638 /* Create a new pool. */
2639 pool = xmalloc (sizeof (* pool));
2640 if (! pool)
2641 return NULL;
2642
2643 pool->next_free_entry = 0;
2644 pool->section = now_seg;
2645 pool->sub_section = now_subseg;
2646 pool->next = list_of_pools;
2647 pool->symbol = NULL;
2648
2649 /* Add it to the list. */
2650 list_of_pools = pool;
2651 }
2652
2653 /* New pools, and emptied pools, will have a NULL symbol. */
2654 if (pool->symbol == NULL)
2655 {
2656 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2657 (valueT) 0, &zero_address_frag);
2658 pool->id = latest_pool_num ++;
2659 }
2660
2661 /* Done. */
2662 return pool;
2663 }
2664
2665 /* Add the literal in the global 'inst'
2666 structure to the relevent literal pool. */
2667
2668 static int
2669 add_to_lit_pool (void)
2670 {
2671 literal_pool * pool;
2672 unsigned int entry;
2673
2674 pool = find_or_make_literal_pool ();
2675
2676 /* Check if this literal value is already in the pool. */
2677 for (entry = 0; entry < pool->next_free_entry; entry ++)
2678 {
2679 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2680 && (inst.reloc.exp.X_op == O_constant)
2681 && (pool->literals[entry].X_add_number
2682 == inst.reloc.exp.X_add_number)
2683 && (pool->literals[entry].X_unsigned
2684 == inst.reloc.exp.X_unsigned))
2685 break;
2686
2687 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2688 && (inst.reloc.exp.X_op == O_symbol)
2689 && (pool->literals[entry].X_add_number
2690 == inst.reloc.exp.X_add_number)
2691 && (pool->literals[entry].X_add_symbol
2692 == inst.reloc.exp.X_add_symbol)
2693 && (pool->literals[entry].X_op_symbol
2694 == inst.reloc.exp.X_op_symbol))
2695 break;
2696 }
2697
2698 /* Do we need to create a new entry? */
2699 if (entry == pool->next_free_entry)
2700 {
2701 if (entry >= MAX_LITERAL_POOL_SIZE)
2702 {
2703 inst.error = _("literal pool overflow");
2704 return FAIL;
2705 }
2706
2707 pool->literals[entry] = inst.reloc.exp;
2708 pool->next_free_entry += 1;
2709 }
2710
2711 inst.reloc.exp.X_op = O_symbol;
2712 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2713 inst.reloc.exp.X_add_symbol = pool->symbol;
2714
2715 return SUCCESS;
2716 }
2717
2718 /* Can't use symbol_new here, so have to create a symbol and then at
2719 a later date assign it a value. Thats what these functions do. */
2720
2721 static void
2722 symbol_locate (symbolS * symbolP,
2723 const char * name, /* It is copied, the caller can modify. */
2724 segT segment, /* Segment identifier (SEG_<something>). */
2725 valueT valu, /* Symbol value. */
2726 fragS * frag) /* Associated fragment. */
2727 {
2728 unsigned int name_length;
2729 char * preserved_copy_of_name;
2730
2731 name_length = strlen (name) + 1; /* +1 for \0. */
2732 obstack_grow (&notes, name, name_length);
2733 preserved_copy_of_name = obstack_finish (&notes);
2734
2735 #ifdef tc_canonicalize_symbol_name
2736 preserved_copy_of_name =
2737 tc_canonicalize_symbol_name (preserved_copy_of_name);
2738 #endif
2739
2740 S_SET_NAME (symbolP, preserved_copy_of_name);
2741
2742 S_SET_SEGMENT (symbolP, segment);
2743 S_SET_VALUE (symbolP, valu);
2744 symbol_clear_list_pointers (symbolP);
2745
2746 symbol_set_frag (symbolP, frag);
2747
2748 /* Link to end of symbol chain. */
2749 {
2750 extern int symbol_table_frozen;
2751
2752 if (symbol_table_frozen)
2753 abort ();
2754 }
2755
2756 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2757
2758 obj_symbol_new_hook (symbolP);
2759
2760 #ifdef tc_symbol_new_hook
2761 tc_symbol_new_hook (symbolP);
2762 #endif
2763
2764 #ifdef DEBUG_SYMS
2765 verify_symbol_chain (symbol_rootP, symbol_lastP);
2766 #endif /* DEBUG_SYMS */
2767 }
2768
2769
2770 static void
2771 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2772 {
2773 unsigned int entry;
2774 literal_pool * pool;
2775 char sym_name[20];
2776
2777 pool = find_literal_pool ();
2778 if (pool == NULL
2779 || pool->symbol == NULL
2780 || pool->next_free_entry == 0)
2781 return;
2782
2783 mapping_state (MAP_DATA);
2784
2785 /* Align pool as you have word accesses.
2786 Only make a frag if we have to. */
2787 if (!need_pass_2)
2788 frag_align (2, 0, 0);
2789
2790 record_alignment (now_seg, 2);
2791
2792 sprintf (sym_name, "$$lit_\002%x", pool->id);
2793
2794 symbol_locate (pool->symbol, sym_name, now_seg,
2795 (valueT) frag_now_fix (), frag_now);
2796 symbol_table_insert (pool->symbol);
2797
2798 ARM_SET_THUMB (pool->symbol, thumb_mode);
2799
2800 #if defined OBJ_COFF || defined OBJ_ELF
2801 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2802 #endif
2803
2804 for (entry = 0; entry < pool->next_free_entry; entry ++)
2805 /* First output the expression in the instruction to the pool. */
2806 emit_expr (&(pool->literals[entry]), 4); /* .word */
2807
2808 /* Mark the pool as empty. */
2809 pool->next_free_entry = 0;
2810 pool->symbol = NULL;
2811 }
2812
2813 #ifdef OBJ_ELF
2814 /* Forward declarations for functions below, in the MD interface
2815 section. */
2816 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2817 static valueT create_unwind_entry (int);
2818 static void start_unwind_section (const segT, int);
2819 static void add_unwind_opcode (valueT, int);
2820 static void flush_pending_unwind (void);
2821
2822 /* Directives: Data. */
2823
2824 static void
2825 s_arm_elf_cons (int nbytes)
2826 {
2827 expressionS exp;
2828
2829 #ifdef md_flush_pending_output
2830 md_flush_pending_output ();
2831 #endif
2832
2833 if (is_it_end_of_statement ())
2834 {
2835 demand_empty_rest_of_line ();
2836 return;
2837 }
2838
2839 #ifdef md_cons_align
2840 md_cons_align (nbytes);
2841 #endif
2842
2843 mapping_state (MAP_DATA);
2844 do
2845 {
2846 int reloc;
2847 char *base = input_line_pointer;
2848
2849 expression (& exp);
2850
2851 if (exp.X_op != O_symbol)
2852 emit_expr (&exp, (unsigned int) nbytes);
2853 else
2854 {
2855 char *before_reloc = input_line_pointer;
2856 reloc = parse_reloc (&input_line_pointer);
2857 if (reloc == -1)
2858 {
2859 as_bad (_("unrecognized relocation suffix"));
2860 ignore_rest_of_line ();
2861 return;
2862 }
2863 else if (reloc == BFD_RELOC_UNUSED)
2864 emit_expr (&exp, (unsigned int) nbytes);
2865 else
2866 {
2867 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2868 int size = bfd_get_reloc_size (howto);
2869
2870 if (reloc == BFD_RELOC_ARM_PLT32)
2871 {
2872 as_bad (_("(plt) is only valid on branch targets"));
2873 reloc = BFD_RELOC_UNUSED;
2874 size = 0;
2875 }
2876
2877 if (size > nbytes)
2878 as_bad (_("%s relocations do not fit in %d bytes"),
2879 howto->name, nbytes);
2880 else
2881 {
2882 /* We've parsed an expression stopping at O_symbol.
2883 But there may be more expression left now that we
2884 have parsed the relocation marker. Parse it again.
2885 XXX Surely there is a cleaner way to do this. */
2886 char *p = input_line_pointer;
2887 int offset;
2888 char *save_buf = alloca (input_line_pointer - base);
2889 memcpy (save_buf, base, input_line_pointer - base);
2890 memmove (base + (input_line_pointer - before_reloc),
2891 base, before_reloc - base);
2892
2893 input_line_pointer = base + (input_line_pointer-before_reloc);
2894 expression (&exp);
2895 memcpy (base, save_buf, p - base);
2896
2897 offset = nbytes - size;
2898 p = frag_more ((int) nbytes);
2899 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2900 size, &exp, 0, reloc);
2901 }
2902 }
2903 }
2904 }
2905 while (*input_line_pointer++ == ',');
2906
2907 /* Put terminator back into stream. */
2908 input_line_pointer --;
2909 demand_empty_rest_of_line ();
2910 }
2911
2912
2913 /* Parse a .rel31 directive. */
2914
2915 static void
2916 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2917 {
2918 expressionS exp;
2919 char *p;
2920 valueT highbit;
2921
2922 highbit = 0;
2923 if (*input_line_pointer == '1')
2924 highbit = 0x80000000;
2925 else if (*input_line_pointer != '0')
2926 as_bad (_("expected 0 or 1"));
2927
2928 input_line_pointer++;
2929 if (*input_line_pointer != ',')
2930 as_bad (_("missing comma"));
2931 input_line_pointer++;
2932
2933 #ifdef md_flush_pending_output
2934 md_flush_pending_output ();
2935 #endif
2936
2937 #ifdef md_cons_align
2938 md_cons_align (4);
2939 #endif
2940
2941 mapping_state (MAP_DATA);
2942
2943 expression (&exp);
2944
2945 p = frag_more (4);
2946 md_number_to_chars (p, highbit, 4);
2947 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2948 BFD_RELOC_ARM_PREL31);
2949
2950 demand_empty_rest_of_line ();
2951 }
2952
2953 /* Directives: AEABI stack-unwind tables. */
2954
2955 /* Parse an unwind_fnstart directive. Simply records the current location. */
2956
2957 static void
2958 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
2959 {
2960 demand_empty_rest_of_line ();
2961 /* Mark the start of the function. */
2962 unwind.proc_start = expr_build_dot ();
2963
2964 /* Reset the rest of the unwind info. */
2965 unwind.opcode_count = 0;
2966 unwind.table_entry = NULL;
2967 unwind.personality_routine = NULL;
2968 unwind.personality_index = -1;
2969 unwind.frame_size = 0;
2970 unwind.fp_offset = 0;
2971 unwind.fp_reg = 13;
2972 unwind.fp_used = 0;
2973 unwind.sp_restored = 0;
2974 }
2975
2976
2977 /* Parse a handlerdata directive. Creates the exception handling table entry
2978 for the function. */
2979
2980 static void
2981 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
2982 {
2983 demand_empty_rest_of_line ();
2984 if (unwind.table_entry)
2985 as_bad (_("dupicate .handlerdata directive"));
2986
2987 create_unwind_entry (1);
2988 }
2989
2990 /* Parse an unwind_fnend directive. Generates the index table entry. */
2991
2992 static void
2993 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
2994 {
2995 long where;
2996 char *ptr;
2997 valueT val;
2998
2999 demand_empty_rest_of_line ();
3000
3001 /* Add eh table entry. */
3002 if (unwind.table_entry == NULL)
3003 val = create_unwind_entry (0);
3004 else
3005 val = 0;
3006
3007 /* Add index table entry. This is two words. */
3008 start_unwind_section (unwind.saved_seg, 1);
3009 frag_align (2, 0, 0);
3010 record_alignment (now_seg, 2);
3011
3012 ptr = frag_more (8);
3013 where = frag_now_fix () - 8;
3014
3015 /* Self relative offset of the function start. */
3016 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3017 BFD_RELOC_ARM_PREL31);
3018
3019 /* Indicate dependency on EHABI-defined personality routines to the
3020 linker, if it hasn't been done already. */
3021 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3022 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3023 {
3024 static const char *const name[] = {
3025 "__aeabi_unwind_cpp_pr0",
3026 "__aeabi_unwind_cpp_pr1",
3027 "__aeabi_unwind_cpp_pr2"
3028 };
3029 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3030 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3031 marked_pr_dependency |= 1 << unwind.personality_index;
3032 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3033 = marked_pr_dependency;
3034 }
3035
3036 if (val)
3037 /* Inline exception table entry. */
3038 md_number_to_chars (ptr + 4, val, 4);
3039 else
3040 /* Self relative offset of the table entry. */
3041 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3042 BFD_RELOC_ARM_PREL31);
3043
3044 /* Restore the original section. */
3045 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3046 }
3047
3048
3049 /* Parse an unwind_cantunwind directive. */
3050
3051 static void
3052 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3053 {
3054 demand_empty_rest_of_line ();
3055 if (unwind.personality_routine || unwind.personality_index != -1)
3056 as_bad (_("personality routine specified for cantunwind frame"));
3057
3058 unwind.personality_index = -2;
3059 }
3060
3061
3062 /* Parse a personalityindex directive. */
3063
3064 static void
3065 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3066 {
3067 expressionS exp;
3068
3069 if (unwind.personality_routine || unwind.personality_index != -1)
3070 as_bad (_("duplicate .personalityindex directive"));
3071
3072 expression (&exp);
3073
3074 if (exp.X_op != O_constant
3075 || exp.X_add_number < 0 || exp.X_add_number > 15)
3076 {
3077 as_bad (_("bad personality routine number"));
3078 ignore_rest_of_line ();
3079 return;
3080 }
3081
3082 unwind.personality_index = exp.X_add_number;
3083
3084 demand_empty_rest_of_line ();
3085 }
3086
3087
3088 /* Parse a personality directive. */
3089
3090 static void
3091 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3092 {
3093 char *name, *p, c;
3094
3095 if (unwind.personality_routine || unwind.personality_index != -1)
3096 as_bad (_("duplicate .personality directive"));
3097
3098 name = input_line_pointer;
3099 c = get_symbol_end ();
3100 p = input_line_pointer;
3101 unwind.personality_routine = symbol_find_or_make (name);
3102 *p = c;
3103 demand_empty_rest_of_line ();
3104 }
3105
3106
3107 /* Parse a directive saving core registers. */
3108
3109 static void
3110 s_arm_unwind_save_core (void)
3111 {
3112 valueT op;
3113 long range;
3114 int n;
3115
3116 range = parse_reg_list (&input_line_pointer);
3117 if (range == FAIL)
3118 {
3119 as_bad (_("expected register list"));
3120 ignore_rest_of_line ();
3121 return;
3122 }
3123
3124 demand_empty_rest_of_line ();
3125
3126 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3127 into .unwind_save {..., sp...}. We aren't bothered about the value of
3128 ip because it is clobbered by calls. */
3129 if (unwind.sp_restored && unwind.fp_reg == 12
3130 && (range & 0x3000) == 0x1000)
3131 {
3132 unwind.opcode_count--;
3133 unwind.sp_restored = 0;
3134 range = (range | 0x2000) & ~0x1000;
3135 unwind.pending_offset = 0;
3136 }
3137
3138 /* Pop r4-r15. */
3139 if (range & 0xfff0)
3140 {
3141 /* See if we can use the short opcodes. These pop a block of up to 8
3142 registers starting with r4, plus maybe r14. */
3143 for (n = 0; n < 8; n++)
3144 {
3145 /* Break at the first non-saved register. */
3146 if ((range & (1 << (n + 4))) == 0)
3147 break;
3148 }
3149 /* See if there are any other bits set. */
3150 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3151 {
3152 /* Use the long form. */
3153 op = 0x8000 | ((range >> 4) & 0xfff);
3154 add_unwind_opcode (op, 2);
3155 }
3156 else
3157 {
3158 /* Use the short form. */
3159 if (range & 0x4000)
3160 op = 0xa8; /* Pop r14. */
3161 else
3162 op = 0xa0; /* Do not pop r14. */
3163 op |= (n - 1);
3164 add_unwind_opcode (op, 1);
3165 }
3166 }
3167
3168 /* Pop r0-r3. */
3169 if (range & 0xf)
3170 {
3171 op = 0xb100 | (range & 0xf);
3172 add_unwind_opcode (op, 2);
3173 }
3174
3175 /* Record the number of bytes pushed. */
3176 for (n = 0; n < 16; n++)
3177 {
3178 if (range & (1 << n))
3179 unwind.frame_size += 4;
3180 }
3181 }
3182
3183
3184 /* Parse a directive saving FPA registers. */
3185
3186 static void
3187 s_arm_unwind_save_fpa (int reg)
3188 {
3189 expressionS exp;
3190 int num_regs;
3191 valueT op;
3192
3193 /* Get Number of registers to transfer. */
3194 if (skip_past_comma (&input_line_pointer) != FAIL)
3195 expression (&exp);
3196 else
3197 exp.X_op = O_illegal;
3198
3199 if (exp.X_op != O_constant)
3200 {
3201 as_bad (_("expected , <constant>"));
3202 ignore_rest_of_line ();
3203 return;
3204 }
3205
3206 num_regs = exp.X_add_number;
3207
3208 if (num_regs < 1 || num_regs > 4)
3209 {
3210 as_bad (_("number of registers must be in the range [1:4]"));
3211 ignore_rest_of_line ();
3212 return;
3213 }
3214
3215 demand_empty_rest_of_line ();
3216
3217 if (reg == 4)
3218 {
3219 /* Short form. */
3220 op = 0xb4 | (num_regs - 1);
3221 add_unwind_opcode (op, 1);
3222 }
3223 else
3224 {
3225 /* Long form. */
3226 op = 0xc800 | (reg << 4) | (num_regs - 1);
3227 add_unwind_opcode (op, 2);
3228 }
3229 unwind.frame_size += num_regs * 12;
3230 }
3231
3232
3233 /* Parse a directive saving VFP registers. */
3234
3235 static void
3236 s_arm_unwind_save_vfp (void)
3237 {
3238 int count;
3239 unsigned int reg;
3240 valueT op;
3241
3242 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3243 if (count == FAIL)
3244 {
3245 as_bad (_("expected register list"));
3246 ignore_rest_of_line ();
3247 return;
3248 }
3249
3250 demand_empty_rest_of_line ();
3251
3252 if (reg == 8)
3253 {
3254 /* Short form. */
3255 op = 0xb8 | (count - 1);
3256 add_unwind_opcode (op, 1);
3257 }
3258 else
3259 {
3260 /* Long form. */
3261 op = 0xb300 | (reg << 4) | (count - 1);
3262 add_unwind_opcode (op, 2);
3263 }
3264 unwind.frame_size += count * 8 + 4;
3265 }
3266
3267
3268 /* Parse a directive saving iWMMXt data registers. */
3269
3270 static void
3271 s_arm_unwind_save_mmxwr (void)
3272 {
3273 int reg;
3274 int hi_reg;
3275 int i;
3276 unsigned mask = 0;
3277 valueT op;
3278
3279 if (*input_line_pointer == '{')
3280 input_line_pointer++;
3281
3282 do
3283 {
3284 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3285
3286 if (reg == FAIL)
3287 {
3288 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3289 goto error;
3290 }
3291
3292 if (mask >> reg)
3293 as_tsktsk (_("register list not in ascending order"));
3294 mask |= 1 << reg;
3295
3296 if (*input_line_pointer == '-')
3297 {
3298 input_line_pointer++;
3299 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3300 if (hi_reg == FAIL)
3301 {
3302 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3303 goto error;
3304 }
3305 else if (reg >= hi_reg)
3306 {
3307 as_bad (_("bad register range"));
3308 goto error;
3309 }
3310 for (; reg < hi_reg; reg++)
3311 mask |= 1 << reg;
3312 }
3313 }
3314 while (skip_past_comma (&input_line_pointer) != FAIL);
3315
3316 if (*input_line_pointer == '}')
3317 input_line_pointer++;
3318
3319 demand_empty_rest_of_line ();
3320
3321 /* Generate any deferred opcodes because we're going to be looking at
3322 the list. */
3323 flush_pending_unwind ();
3324
3325 for (i = 0; i < 16; i++)
3326 {
3327 if (mask & (1 << i))
3328 unwind.frame_size += 8;
3329 }
3330
3331 /* Attempt to combine with a previous opcode. We do this because gcc
3332 likes to output separate unwind directives for a single block of
3333 registers. */
3334 if (unwind.opcode_count > 0)
3335 {
3336 i = unwind.opcodes[unwind.opcode_count - 1];
3337 if ((i & 0xf8) == 0xc0)
3338 {
3339 i &= 7;
3340 /* Only merge if the blocks are contiguous. */
3341 if (i < 6)
3342 {
3343 if ((mask & 0xfe00) == (1 << 9))
3344 {
3345 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3346 unwind.opcode_count--;
3347 }
3348 }
3349 else if (i == 6 && unwind.opcode_count >= 2)
3350 {
3351 i = unwind.opcodes[unwind.opcode_count - 2];
3352 reg = i >> 4;
3353 i &= 0xf;
3354
3355 op = 0xffff << (reg - 1);
3356 if (reg > 0
3357 || ((mask & op) == (1u << (reg - 1))))
3358 {
3359 op = (1 << (reg + i + 1)) - 1;
3360 op &= ~((1 << reg) - 1);
3361 mask |= op;
3362 unwind.opcode_count -= 2;
3363 }
3364 }
3365 }
3366 }
3367
3368 hi_reg = 15;
3369 /* We want to generate opcodes in the order the registers have been
3370 saved, ie. descending order. */
3371 for (reg = 15; reg >= -1; reg--)
3372 {
3373 /* Save registers in blocks. */
3374 if (reg < 0
3375 || !(mask & (1 << reg)))
3376 {
3377 /* We found an unsaved reg. Generate opcodes to save the
3378 preceeding block. */
3379 if (reg != hi_reg)
3380 {
3381 if (reg == 9)
3382 {
3383 /* Short form. */
3384 op = 0xc0 | (hi_reg - 10);
3385 add_unwind_opcode (op, 1);
3386 }
3387 else
3388 {
3389 /* Long form. */
3390 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3391 add_unwind_opcode (op, 2);
3392 }
3393 }
3394 hi_reg = reg - 1;
3395 }
3396 }
3397
3398 return;
3399 error:
3400 ignore_rest_of_line ();
3401 }
3402
3403 static void
3404 s_arm_unwind_save_mmxwcg (void)
3405 {
3406 int reg;
3407 int hi_reg;
3408 unsigned mask = 0;
3409 valueT op;
3410
3411 if (*input_line_pointer == '{')
3412 input_line_pointer++;
3413
3414 do
3415 {
3416 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3417
3418 if (reg == FAIL)
3419 {
3420 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3421 goto error;
3422 }
3423
3424 reg -= 8;
3425 if (mask >> reg)
3426 as_tsktsk (_("register list not in ascending order"));
3427 mask |= 1 << reg;
3428
3429 if (*input_line_pointer == '-')
3430 {
3431 input_line_pointer++;
3432 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3433 if (hi_reg == FAIL)
3434 {
3435 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3436 goto error;
3437 }
3438 else if (reg >= hi_reg)
3439 {
3440 as_bad (_("bad register range"));
3441 goto error;
3442 }
3443 for (; reg < hi_reg; reg++)
3444 mask |= 1 << reg;
3445 }
3446 }
3447 while (skip_past_comma (&input_line_pointer) != FAIL);
3448
3449 if (*input_line_pointer == '}')
3450 input_line_pointer++;
3451
3452 demand_empty_rest_of_line ();
3453
3454 /* Generate any deferred opcodes because we're going to be looking at
3455 the list. */
3456 flush_pending_unwind ();
3457
3458 for (reg = 0; reg < 16; reg++)
3459 {
3460 if (mask & (1 << reg))
3461 unwind.frame_size += 4;
3462 }
3463 op = 0xc700 | mask;
3464 add_unwind_opcode (op, 2);
3465 return;
3466 error:
3467 ignore_rest_of_line ();
3468 }
3469
3470
3471 /* Parse an unwind_save directive. */
3472
3473 static void
3474 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED)
3475 {
3476 char *peek;
3477 struct reg_entry *reg;
3478 bfd_boolean had_brace = FALSE;
3479
3480 /* Figure out what sort of save we have. */
3481 peek = input_line_pointer;
3482
3483 if (*peek == '{')
3484 {
3485 had_brace = TRUE;
3486 peek++;
3487 }
3488
3489 reg = arm_reg_parse_multi (&peek);
3490
3491 if (!reg)
3492 {
3493 as_bad (_("register expected"));
3494 ignore_rest_of_line ();
3495 return;
3496 }
3497
3498 switch (reg->type)
3499 {
3500 case REG_TYPE_FN:
3501 if (had_brace)
3502 {
3503 as_bad (_("FPA .unwind_save does not take a register list"));
3504 ignore_rest_of_line ();
3505 return;
3506 }
3507 s_arm_unwind_save_fpa (reg->number);
3508 return;
3509
3510 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3511 case REG_TYPE_VFD: s_arm_unwind_save_vfp (); return;
3512 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3513 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3514
3515 default:
3516 as_bad (_(".unwind_save does not support this kind of register"));
3517 ignore_rest_of_line ();
3518 }
3519 }
3520
3521
3522 /* Parse an unwind_movsp directive. */
3523
3524 static void
3525 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3526 {
3527 int reg;
3528 valueT op;
3529
3530 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3531 if (reg == FAIL)
3532 {
3533 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3534 ignore_rest_of_line ();
3535 return;
3536 }
3537 demand_empty_rest_of_line ();
3538
3539 if (reg == REG_SP || reg == REG_PC)
3540 {
3541 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3542 return;
3543 }
3544
3545 if (unwind.fp_reg != REG_SP)
3546 as_bad (_("unexpected .unwind_movsp directive"));
3547
3548 /* Generate opcode to restore the value. */
3549 op = 0x90 | reg;
3550 add_unwind_opcode (op, 1);
3551
3552 /* Record the information for later. */
3553 unwind.fp_reg = reg;
3554 unwind.fp_offset = unwind.frame_size;
3555 unwind.sp_restored = 1;
3556 }
3557
3558 /* Parse an unwind_pad directive. */
3559
3560 static void
3561 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3562 {
3563 int offset;
3564
3565 if (immediate_for_directive (&offset) == FAIL)
3566 return;
3567
3568 if (offset & 3)
3569 {
3570 as_bad (_("stack increment must be multiple of 4"));
3571 ignore_rest_of_line ();
3572 return;
3573 }
3574
3575 /* Don't generate any opcodes, just record the details for later. */
3576 unwind.frame_size += offset;
3577 unwind.pending_offset += offset;
3578
3579 demand_empty_rest_of_line ();
3580 }
3581
3582 /* Parse an unwind_setfp directive. */
3583
3584 static void
3585 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3586 {
3587 int sp_reg;
3588 int fp_reg;
3589 int offset;
3590
3591 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3592 if (skip_past_comma (&input_line_pointer) == FAIL)
3593 sp_reg = FAIL;
3594 else
3595 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3596
3597 if (fp_reg == FAIL || sp_reg == FAIL)
3598 {
3599 as_bad (_("expected <reg>, <reg>"));
3600 ignore_rest_of_line ();
3601 return;
3602 }
3603
3604 /* Optional constant. */
3605 if (skip_past_comma (&input_line_pointer) != FAIL)
3606 {
3607 if (immediate_for_directive (&offset) == FAIL)
3608 return;
3609 }
3610 else
3611 offset = 0;
3612
3613 demand_empty_rest_of_line ();
3614
3615 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3616 {
3617 as_bad (_("register must be either sp or set by a previous"
3618 "unwind_movsp directive"));
3619 return;
3620 }
3621
3622 /* Don't generate any opcodes, just record the information for later. */
3623 unwind.fp_reg = fp_reg;
3624 unwind.fp_used = 1;
3625 if (sp_reg == 13)
3626 unwind.fp_offset = unwind.frame_size - offset;
3627 else
3628 unwind.fp_offset -= offset;
3629 }
3630
3631 /* Parse an unwind_raw directive. */
3632
3633 static void
3634 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3635 {
3636 expressionS exp;
3637 /* This is an arbitrary limit. */
3638 unsigned char op[16];
3639 int count;
3640
3641 expression (&exp);
3642 if (exp.X_op == O_constant
3643 && skip_past_comma (&input_line_pointer) != FAIL)
3644 {
3645 unwind.frame_size += exp.X_add_number;
3646 expression (&exp);
3647 }
3648 else
3649 exp.X_op = O_illegal;
3650
3651 if (exp.X_op != O_constant)
3652 {
3653 as_bad (_("expected <offset>, <opcode>"));
3654 ignore_rest_of_line ();
3655 return;
3656 }
3657
3658 count = 0;
3659
3660 /* Parse the opcode. */
3661 for (;;)
3662 {
3663 if (count >= 16)
3664 {
3665 as_bad (_("unwind opcode too long"));
3666 ignore_rest_of_line ();
3667 }
3668 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3669 {
3670 as_bad (_("invalid unwind opcode"));
3671 ignore_rest_of_line ();
3672 return;
3673 }
3674 op[count++] = exp.X_add_number;
3675
3676 /* Parse the next byte. */
3677 if (skip_past_comma (&input_line_pointer) == FAIL)
3678 break;
3679
3680 expression (&exp);
3681 }
3682
3683 /* Add the opcode bytes in reverse order. */
3684 while (count--)
3685 add_unwind_opcode (op[count], 1);
3686
3687 demand_empty_rest_of_line ();
3688 }
3689
3690
3691 /* Parse a .eabi_attribute directive. */
3692
3693 static void
3694 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3695 {
3696 expressionS exp;
3697 bfd_boolean is_string;
3698 int tag;
3699 unsigned int i = 0;
3700 char *s = NULL;
3701 char saved_char;
3702
3703 expression (& exp);
3704 if (exp.X_op != O_constant)
3705 goto bad;
3706
3707 tag = exp.X_add_number;
3708 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3709 is_string = 1;
3710 else
3711 is_string = 0;
3712
3713 if (skip_past_comma (&input_line_pointer) == FAIL)
3714 goto bad;
3715 if (tag == 32 || !is_string)
3716 {
3717 expression (& exp);
3718 if (exp.X_op != O_constant)
3719 {
3720 as_bad (_("expected numeric constant"));
3721 ignore_rest_of_line ();
3722 return;
3723 }
3724 i = exp.X_add_number;
3725 }
3726 if (tag == Tag_compatibility
3727 && skip_past_comma (&input_line_pointer) == FAIL)
3728 {
3729 as_bad (_("expected comma"));
3730 ignore_rest_of_line ();
3731 return;
3732 }
3733 if (is_string)
3734 {
3735 skip_whitespace(input_line_pointer);
3736 if (*input_line_pointer != '"')
3737 goto bad_string;
3738 input_line_pointer++;
3739 s = input_line_pointer;
3740 while (*input_line_pointer && *input_line_pointer != '"')
3741 input_line_pointer++;
3742 if (*input_line_pointer != '"')
3743 goto bad_string;
3744 saved_char = *input_line_pointer;
3745 *input_line_pointer = 0;
3746 }
3747 else
3748 {
3749 s = NULL;
3750 saved_char = 0;
3751 }
3752
3753 if (tag == Tag_compatibility)
3754 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3755 else if (is_string)
3756 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3757 else
3758 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3759
3760 if (s)
3761 {
3762 *input_line_pointer = saved_char;
3763 input_line_pointer++;
3764 }
3765 demand_empty_rest_of_line ();
3766 return;
3767 bad_string:
3768 as_bad (_("bad string constant"));
3769 ignore_rest_of_line ();
3770 return;
3771 bad:
3772 as_bad (_("expected <tag> , <value>"));
3773 ignore_rest_of_line ();
3774 }
3775 #endif /* OBJ_ELF */
3776
3777 static void s_arm_arch (int);
3778 static void s_arm_cpu (int);
3779 static void s_arm_fpu (int);
3780
3781 /* This table describes all the machine specific pseudo-ops the assembler
3782 has to support. The fields are:
3783 pseudo-op name without dot
3784 function to call to execute this pseudo-op
3785 Integer arg to pass to the function. */
3786
3787 const pseudo_typeS md_pseudo_table[] =
3788 {
3789 /* Never called because '.req' does not start a line. */
3790 { "req", s_req, 0 },
3791 /* Following two are likewise never called. */
3792 { "dn", s_dn, 0 },
3793 { "qn", s_qn, 0 },
3794 { "unreq", s_unreq, 0 },
3795 { "bss", s_bss, 0 },
3796 { "align", s_align, 0 },
3797 { "arm", s_arm, 0 },
3798 { "thumb", s_thumb, 0 },
3799 { "code", s_code, 0 },
3800 { "force_thumb", s_force_thumb, 0 },
3801 { "thumb_func", s_thumb_func, 0 },
3802 { "thumb_set", s_thumb_set, 0 },
3803 { "even", s_even, 0 },
3804 { "ltorg", s_ltorg, 0 },
3805 { "pool", s_ltorg, 0 },
3806 { "syntax", s_syntax, 0 },
3807 { "cpu", s_arm_cpu, 0 },
3808 { "arch", s_arm_arch, 0 },
3809 { "fpu", s_arm_fpu, 0 },
3810 #ifdef OBJ_ELF
3811 { "word", s_arm_elf_cons, 4 },
3812 { "long", s_arm_elf_cons, 4 },
3813 { "rel31", s_arm_rel31, 0 },
3814 { "fnstart", s_arm_unwind_fnstart, 0 },
3815 { "fnend", s_arm_unwind_fnend, 0 },
3816 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3817 { "personality", s_arm_unwind_personality, 0 },
3818 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3819 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3820 { "save", s_arm_unwind_save, 0 },
3821 { "movsp", s_arm_unwind_movsp, 0 },
3822 { "pad", s_arm_unwind_pad, 0 },
3823 { "setfp", s_arm_unwind_setfp, 0 },
3824 { "unwind_raw", s_arm_unwind_raw, 0 },
3825 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3826 #else
3827 { "word", cons, 4},
3828 #endif
3829 { "extend", float_cons, 'x' },
3830 { "ldouble", float_cons, 'x' },
3831 { "packed", float_cons, 'p' },
3832 { 0, 0, 0 }
3833 };
3834 \f
3835 /* Parser functions used exclusively in instruction operands. */
3836
3837 /* Generic immediate-value read function for use in insn parsing.
3838 STR points to the beginning of the immediate (the leading #);
3839 VAL receives the value; if the value is outside [MIN, MAX]
3840 issue an error. PREFIX_OPT is true if the immediate prefix is
3841 optional. */
3842
3843 static int
3844 parse_immediate (char **str, int *val, int min, int max,
3845 bfd_boolean prefix_opt)
3846 {
3847 expressionS exp;
3848 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3849 if (exp.X_op != O_constant)
3850 {
3851 inst.error = _("constant expression required");
3852 return FAIL;
3853 }
3854
3855 if (exp.X_add_number < min || exp.X_add_number > max)
3856 {
3857 inst.error = _("immediate value out of range");
3858 return FAIL;
3859 }
3860
3861 *val = exp.X_add_number;
3862 return SUCCESS;
3863 }
3864
3865 /* Less-generic immediate-value read function with the possibility of loading a
3866 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3867 instructions. Puts the result directly in inst.operands[i]. */
3868
3869 static int
3870 parse_big_immediate (char **str, int i)
3871 {
3872 expressionS exp;
3873 char *ptr = *str;
3874
3875 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
3876
3877 if (exp.X_op == O_constant)
3878 inst.operands[i].imm = exp.X_add_number;
3879 else if (exp.X_op == O_big
3880 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
3881 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
3882 {
3883 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
3884 /* Bignums have their least significant bits in
3885 generic_bignum[0]. Make sure we put 32 bits in imm and
3886 32 bits in reg, in a (hopefully) portable way. */
3887 assert (parts != 0);
3888 inst.operands[i].imm = 0;
3889 for (j = 0; j < parts; j++, idx++)
3890 inst.operands[i].imm |= generic_bignum[idx]
3891 << (LITTLENUM_NUMBER_OF_BITS * j);
3892 inst.operands[i].reg = 0;
3893 for (j = 0; j < parts; j++, idx++)
3894 inst.operands[i].reg |= generic_bignum[idx]
3895 << (LITTLENUM_NUMBER_OF_BITS * j);
3896 inst.operands[i].regisimm = 1;
3897 }
3898 else
3899 return FAIL;
3900
3901 *str = ptr;
3902
3903 return SUCCESS;
3904 }
3905
3906 /* Returns the pseudo-register number of an FPA immediate constant,
3907 or FAIL if there isn't a valid constant here. */
3908
3909 static int
3910 parse_fpa_immediate (char ** str)
3911 {
3912 LITTLENUM_TYPE words[MAX_LITTLENUMS];
3913 char * save_in;
3914 expressionS exp;
3915 int i;
3916 int j;
3917
3918 /* First try and match exact strings, this is to guarantee
3919 that some formats will work even for cross assembly. */
3920
3921 for (i = 0; fp_const[i]; i++)
3922 {
3923 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
3924 {
3925 char *start = *str;
3926
3927 *str += strlen (fp_const[i]);
3928 if (is_end_of_line[(unsigned char) **str])
3929 return i + 8;
3930 *str = start;
3931 }
3932 }
3933
3934 /* Just because we didn't get a match doesn't mean that the constant
3935 isn't valid, just that it is in a format that we don't
3936 automatically recognize. Try parsing it with the standard
3937 expression routines. */
3938
3939 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
3940
3941 /* Look for a raw floating point number. */
3942 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
3943 && is_end_of_line[(unsigned char) *save_in])
3944 {
3945 for (i = 0; i < NUM_FLOAT_VALS; i++)
3946 {
3947 for (j = 0; j < MAX_LITTLENUMS; j++)
3948 {
3949 if (words[j] != fp_values[i][j])
3950 break;
3951 }
3952
3953 if (j == MAX_LITTLENUMS)
3954 {
3955 *str = save_in;
3956 return i + 8;
3957 }
3958 }
3959 }
3960
3961 /* Try and parse a more complex expression, this will probably fail
3962 unless the code uses a floating point prefix (eg "0f"). */
3963 save_in = input_line_pointer;
3964 input_line_pointer = *str;
3965 if (expression (&exp) == absolute_section
3966 && exp.X_op == O_big
3967 && exp.X_add_number < 0)
3968 {
3969 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3970 Ditto for 15. */
3971 if (gen_to_words (words, 5, (long) 15) == 0)
3972 {
3973 for (i = 0; i < NUM_FLOAT_VALS; i++)
3974 {
3975 for (j = 0; j < MAX_LITTLENUMS; j++)
3976 {
3977 if (words[j] != fp_values[i][j])
3978 break;
3979 }
3980
3981 if (j == MAX_LITTLENUMS)
3982 {
3983 *str = input_line_pointer;
3984 input_line_pointer = save_in;
3985 return i + 8;
3986 }
3987 }
3988 }
3989 }
3990
3991 *str = input_line_pointer;
3992 input_line_pointer = save_in;
3993 inst.error = _("invalid FPA immediate expression");
3994 return FAIL;
3995 }
3996
3997 /* Returns 1 if a number has "quarter-precision" float format
3998 0baBbbbbbc defgh000 00000000 00000000. */
3999
4000 static int
4001 is_quarter_float (unsigned imm)
4002 {
4003 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4004 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4005 }
4006
4007 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4008 0baBbbbbbc defgh000 00000000 00000000.
4009 The minus-zero case needs special handling, since it can't be encoded in the
4010 "quarter-precision" float format, but can nonetheless be loaded as an integer
4011 constant. */
4012
4013 static unsigned
4014 parse_qfloat_immediate (char **ccp, int *immed)
4015 {
4016 char *str = *ccp;
4017 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4018
4019 skip_past_char (&str, '#');
4020
4021 if ((str = atof_ieee (str, 's', words)) != NULL)
4022 {
4023 unsigned fpword = 0;
4024 int i;
4025
4026 /* Our FP word must be 32 bits (single-precision FP). */
4027 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4028 {
4029 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4030 fpword |= words[i];
4031 }
4032
4033 if (is_quarter_float (fpword) || fpword == 0x80000000)
4034 *immed = fpword;
4035 else
4036 return FAIL;
4037
4038 *ccp = str;
4039
4040 return SUCCESS;
4041 }
4042
4043 return FAIL;
4044 }
4045
4046 /* Shift operands. */
4047 enum shift_kind
4048 {
4049 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4050 };
4051
4052 struct asm_shift_name
4053 {
4054 const char *name;
4055 enum shift_kind kind;
4056 };
4057
4058 /* Third argument to parse_shift. */
4059 enum parse_shift_mode
4060 {
4061 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4062 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4063 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4064 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4065 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4066 };
4067
4068 /* Parse a <shift> specifier on an ARM data processing instruction.
4069 This has three forms:
4070
4071 (LSL|LSR|ASL|ASR|ROR) Rs
4072 (LSL|LSR|ASL|ASR|ROR) #imm
4073 RRX
4074
4075 Note that ASL is assimilated to LSL in the instruction encoding, and
4076 RRX to ROR #0 (which cannot be written as such). */
4077
4078 static int
4079 parse_shift (char **str, int i, enum parse_shift_mode mode)
4080 {
4081 const struct asm_shift_name *shift_name;
4082 enum shift_kind shift;
4083 char *s = *str;
4084 char *p = s;
4085 int reg;
4086
4087 for (p = *str; ISALPHA (*p); p++)
4088 ;
4089
4090 if (p == *str)
4091 {
4092 inst.error = _("shift expression expected");
4093 return FAIL;
4094 }
4095
4096 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4097
4098 if (shift_name == NULL)
4099 {
4100 inst.error = _("shift expression expected");
4101 return FAIL;
4102 }
4103
4104 shift = shift_name->kind;
4105
4106 switch (mode)
4107 {
4108 case NO_SHIFT_RESTRICT:
4109 case SHIFT_IMMEDIATE: break;
4110
4111 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4112 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4113 {
4114 inst.error = _("'LSL' or 'ASR' required");
4115 return FAIL;
4116 }
4117 break;
4118
4119 case SHIFT_LSL_IMMEDIATE:
4120 if (shift != SHIFT_LSL)
4121 {
4122 inst.error = _("'LSL' required");
4123 return FAIL;
4124 }
4125 break;
4126
4127 case SHIFT_ASR_IMMEDIATE:
4128 if (shift != SHIFT_ASR)
4129 {
4130 inst.error = _("'ASR' required");
4131 return FAIL;
4132 }
4133 break;
4134
4135 default: abort ();
4136 }
4137
4138 if (shift != SHIFT_RRX)
4139 {
4140 /* Whitespace can appear here if the next thing is a bare digit. */
4141 skip_whitespace (p);
4142
4143 if (mode == NO_SHIFT_RESTRICT
4144 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4145 {
4146 inst.operands[i].imm = reg;
4147 inst.operands[i].immisreg = 1;
4148 }
4149 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4150 return FAIL;
4151 }
4152 inst.operands[i].shift_kind = shift;
4153 inst.operands[i].shifted = 1;
4154 *str = p;
4155 return SUCCESS;
4156 }
4157
4158 /* Parse a <shifter_operand> for an ARM data processing instruction:
4159
4160 #<immediate>
4161 #<immediate>, <rotate>
4162 <Rm>
4163 <Rm>, <shift>
4164
4165 where <shift> is defined by parse_shift above, and <rotate> is a
4166 multiple of 2 between 0 and 30. Validation of immediate operands
4167 is deferred to md_apply_fix. */
4168
4169 static int
4170 parse_shifter_operand (char **str, int i)
4171 {
4172 int value;
4173 expressionS expr;
4174
4175 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4176 {
4177 inst.operands[i].reg = value;
4178 inst.operands[i].isreg = 1;
4179
4180 /* parse_shift will override this if appropriate */
4181 inst.reloc.exp.X_op = O_constant;
4182 inst.reloc.exp.X_add_number = 0;
4183
4184 if (skip_past_comma (str) == FAIL)
4185 return SUCCESS;
4186
4187 /* Shift operation on register. */
4188 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4189 }
4190
4191 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4192 return FAIL;
4193
4194 if (skip_past_comma (str) == SUCCESS)
4195 {
4196 /* #x, y -- ie explicit rotation by Y. */
4197 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4198 return FAIL;
4199
4200 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4201 {
4202 inst.error = _("constant expression expected");
4203 return FAIL;
4204 }
4205
4206 value = expr.X_add_number;
4207 if (value < 0 || value > 30 || value % 2 != 0)
4208 {
4209 inst.error = _("invalid rotation");
4210 return FAIL;
4211 }
4212 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4213 {
4214 inst.error = _("invalid constant");
4215 return FAIL;
4216 }
4217
4218 /* Convert to decoded value. md_apply_fix will put it back. */
4219 inst.reloc.exp.X_add_number
4220 = (((inst.reloc.exp.X_add_number << (32 - value))
4221 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4222 }
4223
4224 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4225 inst.reloc.pc_rel = 0;
4226 return SUCCESS;
4227 }
4228
4229 /* Parse all forms of an ARM address expression. Information is written
4230 to inst.operands[i] and/or inst.reloc.
4231
4232 Preindexed addressing (.preind=1):
4233
4234 [Rn, #offset] .reg=Rn .reloc.exp=offset
4235 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4236 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4237 .shift_kind=shift .reloc.exp=shift_imm
4238
4239 These three may have a trailing ! which causes .writeback to be set also.
4240
4241 Postindexed addressing (.postind=1, .writeback=1):
4242
4243 [Rn], #offset .reg=Rn .reloc.exp=offset
4244 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4245 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4246 .shift_kind=shift .reloc.exp=shift_imm
4247
4248 Unindexed addressing (.preind=0, .postind=0):
4249
4250 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4251
4252 Other:
4253
4254 [Rn]{!} shorthand for [Rn,#0]{!}
4255 =immediate .isreg=0 .reloc.exp=immediate
4256 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4257
4258 It is the caller's responsibility to check for addressing modes not
4259 supported by the instruction, and to set inst.reloc.type. */
4260
4261 static int
4262 parse_address (char **str, int i)
4263 {
4264 char *p = *str;
4265 int reg;
4266
4267 if (skip_past_char (&p, '[') == FAIL)
4268 {
4269 if (skip_past_char (&p, '=') == FAIL)
4270 {
4271 /* bare address - translate to PC-relative offset */
4272 inst.reloc.pc_rel = 1;
4273 inst.operands[i].reg = REG_PC;
4274 inst.operands[i].isreg = 1;
4275 inst.operands[i].preind = 1;
4276 }
4277 /* else a load-constant pseudo op, no special treatment needed here */
4278
4279 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4280 return FAIL;
4281
4282 *str = p;
4283 return SUCCESS;
4284 }
4285
4286 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4287 {
4288 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4289 return FAIL;
4290 }
4291 inst.operands[i].reg = reg;
4292 inst.operands[i].isreg = 1;
4293
4294 if (skip_past_comma (&p) == SUCCESS)
4295 {
4296 inst.operands[i].preind = 1;
4297
4298 if (*p == '+') p++;
4299 else if (*p == '-') p++, inst.operands[i].negative = 1;
4300
4301 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4302 {
4303 inst.operands[i].imm = reg;
4304 inst.operands[i].immisreg = 1;
4305
4306 if (skip_past_comma (&p) == SUCCESS)
4307 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4308 return FAIL;
4309 }
4310 else if (skip_past_char (&p, ':') == SUCCESS)
4311 {
4312 /* FIXME: '@' should be used here, but it's filtered out by generic
4313 code before we get to see it here. This may be subject to
4314 change. */
4315 expressionS exp;
4316 my_get_expression (&exp, &p, GE_NO_PREFIX);
4317 if (exp.X_op != O_constant)
4318 {
4319 inst.error = _("alignment must be constant");
4320 return FAIL;
4321 }
4322 inst.operands[i].imm = exp.X_add_number << 8;
4323 inst.operands[i].immisalign = 1;
4324 /* Alignments are not pre-indexes. */
4325 inst.operands[i].preind = 0;
4326 }
4327 else
4328 {
4329 if (inst.operands[i].negative)
4330 {
4331 inst.operands[i].negative = 0;
4332 p--;
4333 }
4334 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4335 return FAIL;
4336 }
4337 }
4338
4339 if (skip_past_char (&p, ']') == FAIL)
4340 {
4341 inst.error = _("']' expected");
4342 return FAIL;
4343 }
4344
4345 if (skip_past_char (&p, '!') == SUCCESS)
4346 inst.operands[i].writeback = 1;
4347
4348 else if (skip_past_comma (&p) == SUCCESS)
4349 {
4350 if (skip_past_char (&p, '{') == SUCCESS)
4351 {
4352 /* [Rn], {expr} - unindexed, with option */
4353 if (parse_immediate (&p, &inst.operands[i].imm,
4354 0, 255, TRUE) == FAIL)
4355 return FAIL;
4356
4357 if (skip_past_char (&p, '}') == FAIL)
4358 {
4359 inst.error = _("'}' expected at end of 'option' field");
4360 return FAIL;
4361 }
4362 if (inst.operands[i].preind)
4363 {
4364 inst.error = _("cannot combine index with option");
4365 return FAIL;
4366 }
4367 *str = p;
4368 return SUCCESS;
4369 }
4370 else
4371 {
4372 inst.operands[i].postind = 1;
4373 inst.operands[i].writeback = 1;
4374
4375 if (inst.operands[i].preind)
4376 {
4377 inst.error = _("cannot combine pre- and post-indexing");
4378 return FAIL;
4379 }
4380
4381 if (*p == '+') p++;
4382 else if (*p == '-') p++, inst.operands[i].negative = 1;
4383
4384 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4385 {
4386 /* We might be using the immediate for alignment already. If we
4387 are, OR the register number into the low-order bits. */
4388 if (inst.operands[i].immisalign)
4389 inst.operands[i].imm |= reg;
4390 else
4391 inst.operands[i].imm = reg;
4392 inst.operands[i].immisreg = 1;
4393
4394 if (skip_past_comma (&p) == SUCCESS)
4395 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4396 return FAIL;
4397 }
4398 else
4399 {
4400 if (inst.operands[i].negative)
4401 {
4402 inst.operands[i].negative = 0;
4403 p--;
4404 }
4405 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4406 return FAIL;
4407 }
4408 }
4409 }
4410
4411 /* If at this point neither .preind nor .postind is set, we have a
4412 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4413 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4414 {
4415 inst.operands[i].preind = 1;
4416 inst.reloc.exp.X_op = O_constant;
4417 inst.reloc.exp.X_add_number = 0;
4418 }
4419 *str = p;
4420 return SUCCESS;
4421 }
4422
4423 /* Miscellaneous. */
4424
4425 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4426 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4427 static int
4428 parse_psr (char **str)
4429 {
4430 char *p;
4431 unsigned long psr_field;
4432 const struct asm_psr *psr;
4433 char *start;
4434
4435 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4436 feature for ease of use and backwards compatibility. */
4437 p = *str;
4438 if (strncasecmp (p, "SPSR", 4) == 0)
4439 psr_field = SPSR_BIT;
4440 else if (strncasecmp (p, "CPSR", 4) == 0)
4441 psr_field = 0;
4442 else
4443 {
4444 start = p;
4445 do
4446 p++;
4447 while (ISALNUM (*p) || *p == '_');
4448
4449 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4450 if (!psr)
4451 return FAIL;
4452
4453 *str = p;
4454 return psr->field;
4455 }
4456
4457 p += 4;
4458 if (*p == '_')
4459 {
4460 /* A suffix follows. */
4461 p++;
4462 start = p;
4463
4464 do
4465 p++;
4466 while (ISALNUM (*p) || *p == '_');
4467
4468 psr = hash_find_n (arm_psr_hsh, start, p - start);
4469 if (!psr)
4470 goto error;
4471
4472 psr_field |= psr->field;
4473 }
4474 else
4475 {
4476 if (ISALNUM (*p))
4477 goto error; /* Garbage after "[CS]PSR". */
4478
4479 psr_field |= (PSR_c | PSR_f);
4480 }
4481 *str = p;
4482 return psr_field;
4483
4484 error:
4485 inst.error = _("flag for {c}psr instruction expected");
4486 return FAIL;
4487 }
4488
4489 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4490 value suitable for splatting into the AIF field of the instruction. */
4491
4492 static int
4493 parse_cps_flags (char **str)
4494 {
4495 int val = 0;
4496 int saw_a_flag = 0;
4497 char *s = *str;
4498
4499 for (;;)
4500 switch (*s++)
4501 {
4502 case '\0': case ',':
4503 goto done;
4504
4505 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4506 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4507 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4508
4509 default:
4510 inst.error = _("unrecognized CPS flag");
4511 return FAIL;
4512 }
4513
4514 done:
4515 if (saw_a_flag == 0)
4516 {
4517 inst.error = _("missing CPS flags");
4518 return FAIL;
4519 }
4520
4521 *str = s - 1;
4522 return val;
4523 }
4524
4525 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4526 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4527
4528 static int
4529 parse_endian_specifier (char **str)
4530 {
4531 int little_endian;
4532 char *s = *str;
4533
4534 if (strncasecmp (s, "BE", 2))
4535 little_endian = 0;
4536 else if (strncasecmp (s, "LE", 2))
4537 little_endian = 1;
4538 else
4539 {
4540 inst.error = _("valid endian specifiers are be or le");
4541 return FAIL;
4542 }
4543
4544 if (ISALNUM (s[2]) || s[2] == '_')
4545 {
4546 inst.error = _("valid endian specifiers are be or le");
4547 return FAIL;
4548 }
4549
4550 *str = s + 2;
4551 return little_endian;
4552 }
4553
4554 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4555 value suitable for poking into the rotate field of an sxt or sxta
4556 instruction, or FAIL on error. */
4557
4558 static int
4559 parse_ror (char **str)
4560 {
4561 int rot;
4562 char *s = *str;
4563
4564 if (strncasecmp (s, "ROR", 3) == 0)
4565 s += 3;
4566 else
4567 {
4568 inst.error = _("missing rotation field after comma");
4569 return FAIL;
4570 }
4571
4572 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4573 return FAIL;
4574
4575 switch (rot)
4576 {
4577 case 0: *str = s; return 0x0;
4578 case 8: *str = s; return 0x1;
4579 case 16: *str = s; return 0x2;
4580 case 24: *str = s; return 0x3;
4581
4582 default:
4583 inst.error = _("rotation can only be 0, 8, 16, or 24");
4584 return FAIL;
4585 }
4586 }
4587
4588 /* Parse a conditional code (from conds[] below). The value returned is in the
4589 range 0 .. 14, or FAIL. */
4590 static int
4591 parse_cond (char **str)
4592 {
4593 char *p, *q;
4594 const struct asm_cond *c;
4595
4596 p = q = *str;
4597 while (ISALPHA (*q))
4598 q++;
4599
4600 c = hash_find_n (arm_cond_hsh, p, q - p);
4601 if (!c)
4602 {
4603 inst.error = _("condition required");
4604 return FAIL;
4605 }
4606
4607 *str = q;
4608 return c->value;
4609 }
4610
4611 /* Parse an option for a barrier instruction. Returns the encoding for the
4612 option, or FAIL. */
4613 static int
4614 parse_barrier (char **str)
4615 {
4616 char *p, *q;
4617 const struct asm_barrier_opt *o;
4618
4619 p = q = *str;
4620 while (ISALPHA (*q))
4621 q++;
4622
4623 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
4624 if (!o)
4625 return FAIL;
4626
4627 *str = q;
4628 return o->value;
4629 }
4630
4631 /* Parse the operands of a table branch instruction. Similar to a memory
4632 operand. */
4633 static int
4634 parse_tb (char **str)
4635 {
4636 char * p = *str;
4637 int reg;
4638
4639 if (skip_past_char (&p, '[') == FAIL)
4640 {
4641 inst.error = _("'[' expected");
4642 return FAIL;
4643 }
4644
4645 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4646 {
4647 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4648 return FAIL;
4649 }
4650 inst.operands[0].reg = reg;
4651
4652 if (skip_past_comma (&p) == FAIL)
4653 {
4654 inst.error = _("',' expected");
4655 return FAIL;
4656 }
4657
4658 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4659 {
4660 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4661 return FAIL;
4662 }
4663 inst.operands[0].imm = reg;
4664
4665 if (skip_past_comma (&p) == SUCCESS)
4666 {
4667 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
4668 return FAIL;
4669 if (inst.reloc.exp.X_add_number != 1)
4670 {
4671 inst.error = _("invalid shift");
4672 return FAIL;
4673 }
4674 inst.operands[0].shifted = 1;
4675 }
4676
4677 if (skip_past_char (&p, ']') == FAIL)
4678 {
4679 inst.error = _("']' expected");
4680 return FAIL;
4681 }
4682 *str = p;
4683 return SUCCESS;
4684 }
4685
4686 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4687 information on the types the operands can take and how they are encoded.
4688 Note particularly the abuse of ".regisimm" to signify a Neon register.
4689 Up to three operands may be read; this function handles setting the
4690 ".present" field for each operand itself.
4691 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4692 else returns FAIL. */
4693
4694 static int
4695 parse_neon_mov (char **str, int *which_operand)
4696 {
4697 int i = *which_operand, val;
4698 enum arm_reg_type rtype;
4699 char *ptr = *str;
4700 struct neon_type_el optype;
4701
4702 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4703 {
4704 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4705 inst.operands[i].reg = val;
4706 inst.operands[i].isscalar = 1;
4707 inst.operands[i].vectype = optype;
4708 inst.operands[i++].present = 1;
4709
4710 if (skip_past_comma (&ptr) == FAIL)
4711 goto wanted_comma;
4712
4713 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4714 goto wanted_arm;
4715
4716 inst.operands[i].reg = val;
4717 inst.operands[i].isreg = 1;
4718 inst.operands[i].present = 1;
4719 }
4720 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4721 != FAIL)
4722 {
4723 /* Cases 0, 1, 2, 3, 5 (D only). */
4724 if (skip_past_comma (&ptr) == FAIL)
4725 goto wanted_comma;
4726
4727 inst.operands[i].reg = val;
4728 inst.operands[i].isreg = 1;
4729 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4730 inst.operands[i].vectype = optype;
4731 inst.operands[i++].present = 1;
4732
4733 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4734 {
4735 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4736 inst.operands[i-1].regisimm = 1;
4737 inst.operands[i].reg = val;
4738 inst.operands[i].isreg = 1;
4739 inst.operands[i++].present = 1;
4740
4741 if (rtype == REG_TYPE_NQ)
4742 {
4743 first_error (_("can't use Neon quad register here"));
4744 return FAIL;
4745 }
4746 if (skip_past_comma (&ptr) == FAIL)
4747 goto wanted_comma;
4748 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4749 goto wanted_arm;
4750 inst.operands[i].reg = val;
4751 inst.operands[i].isreg = 1;
4752 inst.operands[i].present = 1;
4753 }
4754 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
4755 {
4756 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4757 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4758 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4759 goto bad_cond;
4760 }
4761 else if (parse_big_immediate (&ptr, i) == SUCCESS)
4762 {
4763 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4764 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4765 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4766 goto bad_cond;
4767 }
4768 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4769 != FAIL)
4770 {
4771 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4772 Case 1: VMOV<c><q> <Dd>, <Dm> */
4773 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4774 goto bad_cond;
4775
4776 inst.operands[i].reg = val;
4777 inst.operands[i].isreg = 1;
4778 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4779 inst.operands[i].vectype = optype;
4780 inst.operands[i].present = 1;
4781 }
4782 else
4783 {
4784 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4785 return FAIL;
4786 }
4787 }
4788 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4789 {
4790 /* Cases 6, 7. */
4791 inst.operands[i].reg = val;
4792 inst.operands[i].isreg = 1;
4793 inst.operands[i++].present = 1;
4794
4795 if (skip_past_comma (&ptr) == FAIL)
4796 goto wanted_comma;
4797
4798 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4799 {
4800 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4801 inst.operands[i].reg = val;
4802 inst.operands[i].isscalar = 1;
4803 inst.operands[i].present = 1;
4804 inst.operands[i].vectype = optype;
4805 }
4806 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4807 {
4808 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4809 inst.operands[i].reg = val;
4810 inst.operands[i].isreg = 1;
4811 inst.operands[i++].present = 1;
4812
4813 if (skip_past_comma (&ptr) == FAIL)
4814 goto wanted_comma;
4815
4816 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFD, NULL, &optype))
4817 == FAIL)
4818 {
4819 first_error (_(reg_expected_msgs[REG_TYPE_VFD]));
4820 return FAIL;
4821 }
4822
4823 inst.operands[i].reg = val;
4824 inst.operands[i].isreg = 1;
4825 inst.operands[i].regisimm = 1;
4826 inst.operands[i].vectype = optype;
4827 inst.operands[i].present = 1;
4828 }
4829 }
4830 else
4831 {
4832 first_error (_("parse error"));
4833 return FAIL;
4834 }
4835
4836 /* Successfully parsed the operands. Update args. */
4837 *which_operand = i;
4838 *str = ptr;
4839 return SUCCESS;
4840
4841 wanted_comma:
4842 first_error (_("expected comma"));
4843 return FAIL;
4844
4845 wanted_arm:
4846 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
4847 return FAIL;
4848
4849 bad_cond:
4850 first_error (_("instruction cannot be conditionalized"));
4851 return FAIL;
4852 }
4853
4854 /* Matcher codes for parse_operands. */
4855 enum operand_parse_code
4856 {
4857 OP_stop, /* end of line */
4858
4859 OP_RR, /* ARM register */
4860 OP_RRnpc, /* ARM register, not r15 */
4861 OP_RRnpcb, /* ARM register, not r15, in square brackets */
4862 OP_RRw, /* ARM register, not r15, optional trailing ! */
4863 OP_RCP, /* Coprocessor number */
4864 OP_RCN, /* Coprocessor register */
4865 OP_RF, /* FPA register */
4866 OP_RVS, /* VFP single precision register */
4867 OP_RVD, /* VFP double precision register (0..15) */
4868 OP_RND, /* Neon double precision register (0..31) */
4869 OP_RNQ, /* Neon quad precision register */
4870 OP_RNDQ, /* Neon double or quad precision register */
4871 OP_RNSC, /* Neon scalar D[X] */
4872 OP_RVC, /* VFP control register */
4873 OP_RMF, /* Maverick F register */
4874 OP_RMD, /* Maverick D register */
4875 OP_RMFX, /* Maverick FX register */
4876 OP_RMDX, /* Maverick DX register */
4877 OP_RMAX, /* Maverick AX register */
4878 OP_RMDS, /* Maverick DSPSC register */
4879 OP_RIWR, /* iWMMXt wR register */
4880 OP_RIWC, /* iWMMXt wC register */
4881 OP_RIWG, /* iWMMXt wCG register */
4882 OP_RXA, /* XScale accumulator register */
4883
4884 OP_REGLST, /* ARM register list */
4885 OP_VRSLST, /* VFP single-precision register list */
4886 OP_VRDLST, /* VFP double-precision register list */
4887 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
4888 OP_NSTRLST, /* Neon element/structure list */
4889
4890 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4891 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
4892 OP_RR_RNSC, /* ARM reg or Neon scalar. */
4893 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
4894 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
4895 OP_VMOV, /* Neon VMOV operands. */
4896 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
4897 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
4898
4899 OP_I0, /* immediate zero */
4900 OP_I7, /* immediate value 0 .. 7 */
4901 OP_I15, /* 0 .. 15 */
4902 OP_I16, /* 1 .. 16 */
4903 OP_I16z, /* 0 .. 16 */
4904 OP_I31, /* 0 .. 31 */
4905 OP_I31w, /* 0 .. 31, optional trailing ! */
4906 OP_I32, /* 1 .. 32 */
4907 OP_I32z, /* 0 .. 32 */
4908 OP_I63, /* 0 .. 63 */
4909 OP_I63s, /* -64 .. 63 */
4910 OP_I64, /* 1 .. 64 */
4911 OP_I64z, /* 0 .. 64 */
4912 OP_I255, /* 0 .. 255 */
4913 OP_Iffff, /* 0 .. 65535 */
4914
4915 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
4916 OP_I7b, /* 0 .. 7 */
4917 OP_I15b, /* 0 .. 15 */
4918 OP_I31b, /* 0 .. 31 */
4919
4920 OP_SH, /* shifter operand */
4921 OP_ADDR, /* Memory address expression (any mode) */
4922 OP_EXP, /* arbitrary expression */
4923 OP_EXPi, /* same, with optional immediate prefix */
4924 OP_EXPr, /* same, with optional relocation suffix */
4925
4926 OP_CPSF, /* CPS flags */
4927 OP_ENDI, /* Endianness specifier */
4928 OP_PSR, /* CPSR/SPSR mask for msr */
4929 OP_COND, /* conditional code */
4930 OP_TB, /* Table branch. */
4931
4932 OP_RRnpc_I0, /* ARM register or literal 0 */
4933 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
4934 OP_RR_EXi, /* ARM register or expression with imm prefix */
4935 OP_RF_IF, /* FPA register or immediate */
4936 OP_RIWR_RIWC, /* iWMMXt R or C reg */
4937
4938 /* Optional operands. */
4939 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
4940 OP_oI31b, /* 0 .. 31 */
4941 OP_oI32b, /* 1 .. 32 */
4942 OP_oIffffb, /* 0 .. 65535 */
4943 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
4944
4945 OP_oRR, /* ARM register */
4946 OP_oRRnpc, /* ARM register, not the PC */
4947 OP_oRND, /* Optional Neon double precision register */
4948 OP_oRNQ, /* Optional Neon quad precision register */
4949 OP_oRNDQ, /* Optional Neon double or quad precision register */
4950 OP_oSHll, /* LSL immediate */
4951 OP_oSHar, /* ASR immediate */
4952 OP_oSHllar, /* LSL or ASR immediate */
4953 OP_oROR, /* ROR 0/8/16/24 */
4954 OP_oBARRIER, /* Option argument for a barrier instruction. */
4955
4956 OP_FIRST_OPTIONAL = OP_oI7b
4957 };
4958
4959 /* Generic instruction operand parser. This does no encoding and no
4960 semantic validation; it merely squirrels values away in the inst
4961 structure. Returns SUCCESS or FAIL depending on whether the
4962 specified grammar matched. */
4963 static int
4964 parse_operands (char *str, const unsigned char *pattern)
4965 {
4966 unsigned const char *upat = pattern;
4967 char *backtrack_pos = 0;
4968 const char *backtrack_error = 0;
4969 int i, val, backtrack_index = 0;
4970 enum arm_reg_type rtype;
4971
4972 #define po_char_or_fail(chr) do { \
4973 if (skip_past_char (&str, chr) == FAIL) \
4974 goto bad_args; \
4975 } while (0)
4976
4977 #define po_reg_or_fail(regtype) do { \
4978 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4979 &inst.operands[i].vectype); \
4980 if (val == FAIL) \
4981 { \
4982 first_error (_(reg_expected_msgs[regtype])); \
4983 goto failure; \
4984 } \
4985 inst.operands[i].reg = val; \
4986 inst.operands[i].isreg = 1; \
4987 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4988 } while (0)
4989
4990 #define po_reg_or_goto(regtype, label) do { \
4991 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4992 &inst.operands[i].vectype); \
4993 if (val == FAIL) \
4994 goto label; \
4995 \
4996 inst.operands[i].reg = val; \
4997 inst.operands[i].isreg = 1; \
4998 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4999 } while (0)
5000
5001 #define po_imm_or_fail(min, max, popt) do { \
5002 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5003 goto failure; \
5004 inst.operands[i].imm = val; \
5005 } while (0)
5006
5007 #define po_scalar_or_goto(elsz, label) do { \
5008 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5009 if (val == FAIL) \
5010 goto label; \
5011 inst.operands[i].reg = val; \
5012 inst.operands[i].isscalar = 1; \
5013 } while (0)
5014
5015 #define po_misc_or_fail(expr) do { \
5016 if (expr) \
5017 goto failure; \
5018 } while (0)
5019
5020 skip_whitespace (str);
5021
5022 for (i = 0; upat[i] != OP_stop; i++)
5023 {
5024 if (upat[i] >= OP_FIRST_OPTIONAL)
5025 {
5026 /* Remember where we are in case we need to backtrack. */
5027 assert (!backtrack_pos);
5028 backtrack_pos = str;
5029 backtrack_error = inst.error;
5030 backtrack_index = i;
5031 }
5032
5033 if (i > 0)
5034 po_char_or_fail (',');
5035
5036 switch (upat[i])
5037 {
5038 /* Registers */
5039 case OP_oRRnpc:
5040 case OP_RRnpc:
5041 case OP_oRR:
5042 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5043 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5044 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5045 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5046 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5047 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5048 case OP_oRND:
5049 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5050 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5051 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5052 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5053 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5054 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5055 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5056 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5057 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5058 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5059 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5060 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5061 case OP_oRNQ:
5062 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5063 case OP_oRNDQ:
5064 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5065
5066 /* Neon scalar. Using an element size of 8 means that some invalid
5067 scalars are accepted here, so deal with those in later code. */
5068 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5069
5070 /* WARNING: We can expand to two operands here. This has the potential
5071 to totally confuse the backtracking mechanism! It will be OK at
5072 least as long as we don't try to use optional args as well,
5073 though. */
5074 case OP_NILO:
5075 {
5076 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5077 i++;
5078 skip_past_comma (&str);
5079 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5080 break;
5081 one_reg_only:
5082 /* Optional register operand was omitted. Unfortunately, it's in
5083 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5084 here (this is a bit grotty). */
5085 inst.operands[i] = inst.operands[i-1];
5086 inst.operands[i-1].present = 0;
5087 break;
5088 try_imm:
5089 /* Immediate gets verified properly later, so accept any now. */
5090 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
5091 }
5092 break;
5093
5094 case OP_RNDQ_I0:
5095 {
5096 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5097 break;
5098 try_imm0:
5099 po_imm_or_fail (0, 0, TRUE);
5100 }
5101 break;
5102
5103 case OP_RR_RNSC:
5104 {
5105 po_scalar_or_goto (8, try_rr);
5106 break;
5107 try_rr:
5108 po_reg_or_fail (REG_TYPE_RN);
5109 }
5110 break;
5111
5112 case OP_RNDQ_RNSC:
5113 {
5114 po_scalar_or_goto (8, try_ndq);
5115 break;
5116 try_ndq:
5117 po_reg_or_fail (REG_TYPE_NDQ);
5118 }
5119 break;
5120
5121 case OP_RND_RNSC:
5122 {
5123 po_scalar_or_goto (8, try_vfd);
5124 break;
5125 try_vfd:
5126 po_reg_or_fail (REG_TYPE_VFD);
5127 }
5128 break;
5129
5130 case OP_VMOV:
5131 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5132 not careful then bad things might happen. */
5133 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5134 break;
5135
5136 case OP_RNDQ_IMVNb:
5137 {
5138 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5139 break;
5140 try_mvnimm:
5141 /* There's a possibility of getting a 64-bit immediate here, so
5142 we need special handling. */
5143 if (parse_big_immediate (&str, i) == FAIL)
5144 {
5145 inst.error = _("immediate value is out of range");
5146 goto failure;
5147 }
5148 }
5149 break;
5150
5151 case OP_RNDQ_I63b:
5152 {
5153 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5154 break;
5155 try_shimm:
5156 po_imm_or_fail (0, 63, TRUE);
5157 }
5158 break;
5159
5160 case OP_RRnpcb:
5161 po_char_or_fail ('[');
5162 po_reg_or_fail (REG_TYPE_RN);
5163 po_char_or_fail (']');
5164 break;
5165
5166 case OP_RRw:
5167 po_reg_or_fail (REG_TYPE_RN);
5168 if (skip_past_char (&str, '!') == SUCCESS)
5169 inst.operands[i].writeback = 1;
5170 break;
5171
5172 /* Immediates */
5173 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5174 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5175 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5176 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5177 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5178 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5179 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5180 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5181 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5182 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5183 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5184 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5185 case OP_Iffff: po_imm_or_fail ( 0, 0xffff, FALSE); break;
5186
5187 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5188 case OP_oI7b:
5189 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5190 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5191 case OP_oI31b:
5192 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5193 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5194 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5195
5196 /* Immediate variants */
5197 case OP_oI255c:
5198 po_char_or_fail ('{');
5199 po_imm_or_fail (0, 255, TRUE);
5200 po_char_or_fail ('}');
5201 break;
5202
5203 case OP_I31w:
5204 /* The expression parser chokes on a trailing !, so we have
5205 to find it first and zap it. */
5206 {
5207 char *s = str;
5208 while (*s && *s != ',')
5209 s++;
5210 if (s[-1] == '!')
5211 {
5212 s[-1] = '\0';
5213 inst.operands[i].writeback = 1;
5214 }
5215 po_imm_or_fail (0, 31, TRUE);
5216 if (str == s - 1)
5217 str = s;
5218 }
5219 break;
5220
5221 /* Expressions */
5222 case OP_EXPi: EXPi:
5223 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5224 GE_OPT_PREFIX));
5225 break;
5226
5227 case OP_EXP:
5228 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5229 GE_NO_PREFIX));
5230 break;
5231
5232 case OP_EXPr: EXPr:
5233 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5234 GE_NO_PREFIX));
5235 if (inst.reloc.exp.X_op == O_symbol)
5236 {
5237 val = parse_reloc (&str);
5238 if (val == -1)
5239 {
5240 inst.error = _("unrecognized relocation suffix");
5241 goto failure;
5242 }
5243 else if (val != BFD_RELOC_UNUSED)
5244 {
5245 inst.operands[i].imm = val;
5246 inst.operands[i].hasreloc = 1;
5247 }
5248 }
5249 break;
5250
5251 /* Register or expression */
5252 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5253 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5254
5255 /* Register or immediate */
5256 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5257 I0: po_imm_or_fail (0, 0, FALSE); break;
5258
5259 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5260 IF:
5261 if (!is_immediate_prefix (*str))
5262 goto bad_args;
5263 str++;
5264 val = parse_fpa_immediate (&str);
5265 if (val == FAIL)
5266 goto failure;
5267 /* FPA immediates are encoded as registers 8-15.
5268 parse_fpa_immediate has already applied the offset. */
5269 inst.operands[i].reg = val;
5270 inst.operands[i].isreg = 1;
5271 break;
5272
5273 /* Two kinds of register */
5274 case OP_RIWR_RIWC:
5275 {
5276 struct reg_entry *rege = arm_reg_parse_multi (&str);
5277 if (rege->type != REG_TYPE_MMXWR
5278 && rege->type != REG_TYPE_MMXWC
5279 && rege->type != REG_TYPE_MMXWCG)
5280 {
5281 inst.error = _("iWMMXt data or control register expected");
5282 goto failure;
5283 }
5284 inst.operands[i].reg = rege->number;
5285 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5286 }
5287 break;
5288
5289 /* Misc */
5290 case OP_CPSF: val = parse_cps_flags (&str); break;
5291 case OP_ENDI: val = parse_endian_specifier (&str); break;
5292 case OP_oROR: val = parse_ror (&str); break;
5293 case OP_PSR: val = parse_psr (&str); break;
5294 case OP_COND: val = parse_cond (&str); break;
5295 case OP_oBARRIER:val = parse_barrier (&str); break;
5296
5297 case OP_TB:
5298 po_misc_or_fail (parse_tb (&str));
5299 break;
5300
5301 /* Register lists */
5302 case OP_REGLST:
5303 val = parse_reg_list (&str);
5304 if (*str == '^')
5305 {
5306 inst.operands[1].writeback = 1;
5307 str++;
5308 }
5309 break;
5310
5311 case OP_VRSLST:
5312 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5313 break;
5314
5315 case OP_VRDLST:
5316 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5317 break;
5318
5319 case OP_NRDLST:
5320 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5321 REGLIST_NEON_D);
5322 break;
5323
5324 case OP_NSTRLST:
5325 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5326 &inst.operands[i].vectype);
5327 break;
5328
5329 /* Addressing modes */
5330 case OP_ADDR:
5331 po_misc_or_fail (parse_address (&str, i));
5332 break;
5333
5334 case OP_SH:
5335 po_misc_or_fail (parse_shifter_operand (&str, i));
5336 break;
5337
5338 case OP_oSHll:
5339 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5340 break;
5341
5342 case OP_oSHar:
5343 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5344 break;
5345
5346 case OP_oSHllar:
5347 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5348 break;
5349
5350 default:
5351 as_fatal ("unhandled operand code %d", upat[i]);
5352 }
5353
5354 /* Various value-based sanity checks and shared operations. We
5355 do not signal immediate failures for the register constraints;
5356 this allows a syntax error to take precedence. */
5357 switch (upat[i])
5358 {
5359 case OP_oRRnpc:
5360 case OP_RRnpc:
5361 case OP_RRnpcb:
5362 case OP_RRw:
5363 case OP_RRnpc_I0:
5364 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5365 inst.error = BAD_PC;
5366 break;
5367
5368 case OP_CPSF:
5369 case OP_ENDI:
5370 case OP_oROR:
5371 case OP_PSR:
5372 case OP_COND:
5373 case OP_oBARRIER:
5374 case OP_REGLST:
5375 case OP_VRSLST:
5376 case OP_VRDLST:
5377 case OP_NRDLST:
5378 case OP_NSTRLST:
5379 if (val == FAIL)
5380 goto failure;
5381 inst.operands[i].imm = val;
5382 break;
5383
5384 default:
5385 break;
5386 }
5387
5388 /* If we get here, this operand was successfully parsed. */
5389 inst.operands[i].present = 1;
5390 continue;
5391
5392 bad_args:
5393 inst.error = BAD_ARGS;
5394
5395 failure:
5396 if (!backtrack_pos)
5397 {
5398 /* The parse routine should already have set inst.error, but set a
5399 defaut here just in case. */
5400 if (!inst.error)
5401 inst.error = _("syntax error");
5402 return FAIL;
5403 }
5404
5405 /* Do not backtrack over a trailing optional argument that
5406 absorbed some text. We will only fail again, with the
5407 'garbage following instruction' error message, which is
5408 probably less helpful than the current one. */
5409 if (backtrack_index == i && backtrack_pos != str
5410 && upat[i+1] == OP_stop)
5411 {
5412 if (!inst.error)
5413 inst.error = _("syntax error");
5414 return FAIL;
5415 }
5416
5417 /* Try again, skipping the optional argument at backtrack_pos. */
5418 str = backtrack_pos;
5419 inst.error = backtrack_error;
5420 inst.operands[backtrack_index].present = 0;
5421 i = backtrack_index;
5422 backtrack_pos = 0;
5423 }
5424
5425 /* Check that we have parsed all the arguments. */
5426 if (*str != '\0' && !inst.error)
5427 inst.error = _("garbage following instruction");
5428
5429 return inst.error ? FAIL : SUCCESS;
5430 }
5431
5432 #undef po_char_or_fail
5433 #undef po_reg_or_fail
5434 #undef po_reg_or_goto
5435 #undef po_imm_or_fail
5436 #undef po_scalar_or_fail
5437 \f
5438 /* Shorthand macro for instruction encoding functions issuing errors. */
5439 #define constraint(expr, err) do { \
5440 if (expr) \
5441 { \
5442 inst.error = err; \
5443 return; \
5444 } \
5445 } while (0)
5446
5447 /* Functions for operand encoding. ARM, then Thumb. */
5448
5449 #define rotate_left(v, n) (v << n | v >> (32 - n))
5450
5451 /* If VAL can be encoded in the immediate field of an ARM instruction,
5452 return the encoded form. Otherwise, return FAIL. */
5453
5454 static unsigned int
5455 encode_arm_immediate (unsigned int val)
5456 {
5457 unsigned int a, i;
5458
5459 for (i = 0; i < 32; i += 2)
5460 if ((a = rotate_left (val, i)) <= 0xff)
5461 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
5462
5463 return FAIL;
5464 }
5465
5466 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5467 return the encoded form. Otherwise, return FAIL. */
5468 static unsigned int
5469 encode_thumb32_immediate (unsigned int val)
5470 {
5471 unsigned int a, i;
5472
5473 if (val <= 0xff)
5474 return val;
5475
5476 for (i = 1; i <= 24; i++)
5477 {
5478 a = val >> i;
5479 if ((val & ~(0xff << i)) == 0)
5480 return ((val >> i) & 0x7f) | ((32 - i) << 7);
5481 }
5482
5483 a = val & 0xff;
5484 if (val == ((a << 16) | a))
5485 return 0x100 | a;
5486 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
5487 return 0x300 | a;
5488
5489 a = val & 0xff00;
5490 if (val == ((a << 16) | a))
5491 return 0x200 | (a >> 8);
5492
5493 return FAIL;
5494 }
5495 /* Encode a VFP SP or DP register number into inst.instruction. */
5496
5497 static void
5498 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
5499 {
5500 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
5501 && reg > 15)
5502 {
5503 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
5504 {
5505 if (thumb_mode)
5506 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
5507 fpu_vfp_ext_v3);
5508 else
5509 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
5510 fpu_vfp_ext_v3);
5511 }
5512 else
5513 {
5514 first_error (_("D register out of range for selected VFP version"));
5515 return;
5516 }
5517 }
5518
5519 switch (pos)
5520 {
5521 case VFP_REG_Sd:
5522 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
5523 break;
5524
5525 case VFP_REG_Sn:
5526 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
5527 break;
5528
5529 case VFP_REG_Sm:
5530 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
5531 break;
5532
5533 case VFP_REG_Dd:
5534 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
5535 break;
5536
5537 case VFP_REG_Dn:
5538 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
5539 break;
5540
5541 case VFP_REG_Dm:
5542 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
5543 break;
5544
5545 default:
5546 abort ();
5547 }
5548 }
5549
5550 /* Encode a <shift> in an ARM-format instruction. The immediate,
5551 if any, is handled by md_apply_fix. */
5552 static void
5553 encode_arm_shift (int i)
5554 {
5555 if (inst.operands[i].shift_kind == SHIFT_RRX)
5556 inst.instruction |= SHIFT_ROR << 5;
5557 else
5558 {
5559 inst.instruction |= inst.operands[i].shift_kind << 5;
5560 if (inst.operands[i].immisreg)
5561 {
5562 inst.instruction |= SHIFT_BY_REG;
5563 inst.instruction |= inst.operands[i].imm << 8;
5564 }
5565 else
5566 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5567 }
5568 }
5569
5570 static void
5571 encode_arm_shifter_operand (int i)
5572 {
5573 if (inst.operands[i].isreg)
5574 {
5575 inst.instruction |= inst.operands[i].reg;
5576 encode_arm_shift (i);
5577 }
5578 else
5579 inst.instruction |= INST_IMMEDIATE;
5580 }
5581
5582 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5583 static void
5584 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
5585 {
5586 assert (inst.operands[i].isreg);
5587 inst.instruction |= inst.operands[i].reg << 16;
5588
5589 if (inst.operands[i].preind)
5590 {
5591 if (is_t)
5592 {
5593 inst.error = _("instruction does not accept preindexed addressing");
5594 return;
5595 }
5596 inst.instruction |= PRE_INDEX;
5597 if (inst.operands[i].writeback)
5598 inst.instruction |= WRITE_BACK;
5599
5600 }
5601 else if (inst.operands[i].postind)
5602 {
5603 assert (inst.operands[i].writeback);
5604 if (is_t)
5605 inst.instruction |= WRITE_BACK;
5606 }
5607 else /* unindexed - only for coprocessor */
5608 {
5609 inst.error = _("instruction does not accept unindexed addressing");
5610 return;
5611 }
5612
5613 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
5614 && (((inst.instruction & 0x000f0000) >> 16)
5615 == ((inst.instruction & 0x0000f000) >> 12)))
5616 as_warn ((inst.instruction & LOAD_BIT)
5617 ? _("destination register same as write-back base")
5618 : _("source register same as write-back base"));
5619 }
5620
5621 /* inst.operands[i] was set up by parse_address. Encode it into an
5622 ARM-format mode 2 load or store instruction. If is_t is true,
5623 reject forms that cannot be used with a T instruction (i.e. not
5624 post-indexed). */
5625 static void
5626 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
5627 {
5628 encode_arm_addr_mode_common (i, is_t);
5629
5630 if (inst.operands[i].immisreg)
5631 {
5632 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
5633 inst.instruction |= inst.operands[i].imm;
5634 if (!inst.operands[i].negative)
5635 inst.instruction |= INDEX_UP;
5636 if (inst.operands[i].shifted)
5637 {
5638 if (inst.operands[i].shift_kind == SHIFT_RRX)
5639 inst.instruction |= SHIFT_ROR << 5;
5640 else
5641 {
5642 inst.instruction |= inst.operands[i].shift_kind << 5;
5643 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5644 }
5645 }
5646 }
5647 else /* immediate offset in inst.reloc */
5648 {
5649 if (inst.reloc.type == BFD_RELOC_UNUSED)
5650 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
5651 }
5652 }
5653
5654 /* inst.operands[i] was set up by parse_address. Encode it into an
5655 ARM-format mode 3 load or store instruction. Reject forms that
5656 cannot be used with such instructions. If is_t is true, reject
5657 forms that cannot be used with a T instruction (i.e. not
5658 post-indexed). */
5659 static void
5660 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
5661 {
5662 if (inst.operands[i].immisreg && inst.operands[i].shifted)
5663 {
5664 inst.error = _("instruction does not accept scaled register index");
5665 return;
5666 }
5667
5668 encode_arm_addr_mode_common (i, is_t);
5669
5670 if (inst.operands[i].immisreg)
5671 {
5672 inst.instruction |= inst.operands[i].imm;
5673 if (!inst.operands[i].negative)
5674 inst.instruction |= INDEX_UP;
5675 }
5676 else /* immediate offset in inst.reloc */
5677 {
5678 inst.instruction |= HWOFFSET_IMM;
5679 if (inst.reloc.type == BFD_RELOC_UNUSED)
5680 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
5681 }
5682 }
5683
5684 /* inst.operands[i] was set up by parse_address. Encode it into an
5685 ARM-format instruction. Reject all forms which cannot be encoded
5686 into a coprocessor load/store instruction. If wb_ok is false,
5687 reject use of writeback; if unind_ok is false, reject use of
5688 unindexed addressing. If reloc_override is not 0, use it instead
5689 of BFD_ARM_CP_OFF_IMM. */
5690
5691 static int
5692 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
5693 {
5694 inst.instruction |= inst.operands[i].reg << 16;
5695
5696 assert (!(inst.operands[i].preind && inst.operands[i].postind));
5697
5698 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
5699 {
5700 assert (!inst.operands[i].writeback);
5701 if (!unind_ok)
5702 {
5703 inst.error = _("instruction does not support unindexed addressing");
5704 return FAIL;
5705 }
5706 inst.instruction |= inst.operands[i].imm;
5707 inst.instruction |= INDEX_UP;
5708 return SUCCESS;
5709 }
5710
5711 if (inst.operands[i].preind)
5712 inst.instruction |= PRE_INDEX;
5713
5714 if (inst.operands[i].writeback)
5715 {
5716 if (inst.operands[i].reg == REG_PC)
5717 {
5718 inst.error = _("pc may not be used with write-back");
5719 return FAIL;
5720 }
5721 if (!wb_ok)
5722 {
5723 inst.error = _("instruction does not support writeback");
5724 return FAIL;
5725 }
5726 inst.instruction |= WRITE_BACK;
5727 }
5728
5729 if (reloc_override)
5730 inst.reloc.type = reloc_override;
5731 else if (thumb_mode)
5732 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
5733 else
5734 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
5735 return SUCCESS;
5736 }
5737
5738 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5739 Determine whether it can be performed with a move instruction; if
5740 it can, convert inst.instruction to that move instruction and
5741 return 1; if it can't, convert inst.instruction to a literal-pool
5742 load and return 0. If this is not a valid thing to do in the
5743 current context, set inst.error and return 1.
5744
5745 inst.operands[i] describes the destination register. */
5746
5747 static int
5748 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
5749 {
5750 unsigned long tbit;
5751
5752 if (thumb_p)
5753 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
5754 else
5755 tbit = LOAD_BIT;
5756
5757 if ((inst.instruction & tbit) == 0)
5758 {
5759 inst.error = _("invalid pseudo operation");
5760 return 1;
5761 }
5762 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
5763 {
5764 inst.error = _("constant expression expected");
5765 return 1;
5766 }
5767 if (inst.reloc.exp.X_op == O_constant)
5768 {
5769 if (thumb_p)
5770 {
5771 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
5772 {
5773 /* This can be done with a mov(1) instruction. */
5774 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
5775 inst.instruction |= inst.reloc.exp.X_add_number;
5776 return 1;
5777 }
5778 }
5779 else
5780 {
5781 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
5782 if (value != FAIL)
5783 {
5784 /* This can be done with a mov instruction. */
5785 inst.instruction &= LITERAL_MASK;
5786 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
5787 inst.instruction |= value & 0xfff;
5788 return 1;
5789 }
5790
5791 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
5792 if (value != FAIL)
5793 {
5794 /* This can be done with a mvn instruction. */
5795 inst.instruction &= LITERAL_MASK;
5796 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
5797 inst.instruction |= value & 0xfff;
5798 return 1;
5799 }
5800 }
5801 }
5802
5803 if (add_to_lit_pool () == FAIL)
5804 {
5805 inst.error = _("literal pool insertion failed");
5806 return 1;
5807 }
5808 inst.operands[1].reg = REG_PC;
5809 inst.operands[1].isreg = 1;
5810 inst.operands[1].preind = 1;
5811 inst.reloc.pc_rel = 1;
5812 inst.reloc.type = (thumb_p
5813 ? BFD_RELOC_ARM_THUMB_OFFSET
5814 : (mode_3
5815 ? BFD_RELOC_ARM_HWLITERAL
5816 : BFD_RELOC_ARM_LITERAL));
5817 return 0;
5818 }
5819
5820 /* Functions for instruction encoding, sorted by subarchitecture.
5821 First some generics; their names are taken from the conventional
5822 bit positions for register arguments in ARM format instructions. */
5823
5824 static void
5825 do_noargs (void)
5826 {
5827 }
5828
5829 static void
5830 do_rd (void)
5831 {
5832 inst.instruction |= inst.operands[0].reg << 12;
5833 }
5834
5835 static void
5836 do_rd_rm (void)
5837 {
5838 inst.instruction |= inst.operands[0].reg << 12;
5839 inst.instruction |= inst.operands[1].reg;
5840 }
5841
5842 static void
5843 do_rd_rn (void)
5844 {
5845 inst.instruction |= inst.operands[0].reg << 12;
5846 inst.instruction |= inst.operands[1].reg << 16;
5847 }
5848
5849 static void
5850 do_rn_rd (void)
5851 {
5852 inst.instruction |= inst.operands[0].reg << 16;
5853 inst.instruction |= inst.operands[1].reg << 12;
5854 }
5855
5856 static void
5857 do_rd_rm_rn (void)
5858 {
5859 unsigned Rn = inst.operands[2].reg;
5860 /* Enforce restrictions on SWP instruction. */
5861 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
5862 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
5863 _("Rn must not overlap other operands"));
5864 inst.instruction |= inst.operands[0].reg << 12;
5865 inst.instruction |= inst.operands[1].reg;
5866 inst.instruction |= Rn << 16;
5867 }
5868
5869 static void
5870 do_rd_rn_rm (void)
5871 {
5872 inst.instruction |= inst.operands[0].reg << 12;
5873 inst.instruction |= inst.operands[1].reg << 16;
5874 inst.instruction |= inst.operands[2].reg;
5875 }
5876
5877 static void
5878 do_rm_rd_rn (void)
5879 {
5880 inst.instruction |= inst.operands[0].reg;
5881 inst.instruction |= inst.operands[1].reg << 12;
5882 inst.instruction |= inst.operands[2].reg << 16;
5883 }
5884
5885 static void
5886 do_imm0 (void)
5887 {
5888 inst.instruction |= inst.operands[0].imm;
5889 }
5890
5891 static void
5892 do_rd_cpaddr (void)
5893 {
5894 inst.instruction |= inst.operands[0].reg << 12;
5895 encode_arm_cp_address (1, TRUE, TRUE, 0);
5896 }
5897
5898 /* ARM instructions, in alphabetical order by function name (except
5899 that wrapper functions appear immediately after the function they
5900 wrap). */
5901
5902 /* This is a pseudo-op of the form "adr rd, label" to be converted
5903 into a relative address of the form "add rd, pc, #label-.-8". */
5904
5905 static void
5906 do_adr (void)
5907 {
5908 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5909
5910 /* Frag hacking will turn this into a sub instruction if the offset turns
5911 out to be negative. */
5912 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5913 inst.reloc.pc_rel = 1;
5914 inst.reloc.exp.X_add_number -= 8;
5915 }
5916
5917 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5918 into a relative address of the form:
5919 add rd, pc, #low(label-.-8)"
5920 add rd, rd, #high(label-.-8)" */
5921
5922 static void
5923 do_adrl (void)
5924 {
5925 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5926
5927 /* Frag hacking will turn this into a sub instruction if the offset turns
5928 out to be negative. */
5929 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
5930 inst.reloc.pc_rel = 1;
5931 inst.size = INSN_SIZE * 2;
5932 inst.reloc.exp.X_add_number -= 8;
5933 }
5934
5935 static void
5936 do_arit (void)
5937 {
5938 if (!inst.operands[1].present)
5939 inst.operands[1].reg = inst.operands[0].reg;
5940 inst.instruction |= inst.operands[0].reg << 12;
5941 inst.instruction |= inst.operands[1].reg << 16;
5942 encode_arm_shifter_operand (2);
5943 }
5944
5945 static void
5946 do_barrier (void)
5947 {
5948 if (inst.operands[0].present)
5949 {
5950 constraint ((inst.instruction & 0xf0) != 0x40
5951 && inst.operands[0].imm != 0xf,
5952 "bad barrier type");
5953 inst.instruction |= inst.operands[0].imm;
5954 }
5955 else
5956 inst.instruction |= 0xf;
5957 }
5958
5959 static void
5960 do_bfc (void)
5961 {
5962 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
5963 constraint (msb > 32, _("bit-field extends past end of register"));
5964 /* The instruction encoding stores the LSB and MSB,
5965 not the LSB and width. */
5966 inst.instruction |= inst.operands[0].reg << 12;
5967 inst.instruction |= inst.operands[1].imm << 7;
5968 inst.instruction |= (msb - 1) << 16;
5969 }
5970
5971 static void
5972 do_bfi (void)
5973 {
5974 unsigned int msb;
5975
5976 /* #0 in second position is alternative syntax for bfc, which is
5977 the same instruction but with REG_PC in the Rm field. */
5978 if (!inst.operands[1].isreg)
5979 inst.operands[1].reg = REG_PC;
5980
5981 msb = inst.operands[2].imm + inst.operands[3].imm;
5982 constraint (msb > 32, _("bit-field extends past end of register"));
5983 /* The instruction encoding stores the LSB and MSB,
5984 not the LSB and width. */
5985 inst.instruction |= inst.operands[0].reg << 12;
5986 inst.instruction |= inst.operands[1].reg;
5987 inst.instruction |= inst.operands[2].imm << 7;
5988 inst.instruction |= (msb - 1) << 16;
5989 }
5990
5991 static void
5992 do_bfx (void)
5993 {
5994 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
5995 _("bit-field extends past end of register"));
5996 inst.instruction |= inst.operands[0].reg << 12;
5997 inst.instruction |= inst.operands[1].reg;
5998 inst.instruction |= inst.operands[2].imm << 7;
5999 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6000 }
6001
6002 /* ARM V5 breakpoint instruction (argument parse)
6003 BKPT <16 bit unsigned immediate>
6004 Instruction is not conditional.
6005 The bit pattern given in insns[] has the COND_ALWAYS condition,
6006 and it is an error if the caller tried to override that. */
6007
6008 static void
6009 do_bkpt (void)
6010 {
6011 /* Top 12 of 16 bits to bits 19:8. */
6012 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6013
6014 /* Bottom 4 of 16 bits to bits 3:0. */
6015 inst.instruction |= inst.operands[0].imm & 0xf;
6016 }
6017
6018 static void
6019 encode_branch (int default_reloc)
6020 {
6021 if (inst.operands[0].hasreloc)
6022 {
6023 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6024 _("the only suffix valid here is '(plt)'"));
6025 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6026 }
6027 else
6028 {
6029 inst.reloc.type = default_reloc;
6030 }
6031 inst.reloc.pc_rel = 1;
6032 }
6033
6034 static void
6035 do_branch (void)
6036 {
6037 #ifdef OBJ_ELF
6038 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6039 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6040 else
6041 #endif
6042 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6043 }
6044
6045 static void
6046 do_bl (void)
6047 {
6048 #ifdef OBJ_ELF
6049 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6050 {
6051 if (inst.cond == COND_ALWAYS)
6052 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6053 else
6054 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6055 }
6056 else
6057 #endif
6058 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6059 }
6060
6061 /* ARM V5 branch-link-exchange instruction (argument parse)
6062 BLX <target_addr> ie BLX(1)
6063 BLX{<condition>} <Rm> ie BLX(2)
6064 Unfortunately, there are two different opcodes for this mnemonic.
6065 So, the insns[].value is not used, and the code here zaps values
6066 into inst.instruction.
6067 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6068
6069 static void
6070 do_blx (void)
6071 {
6072 if (inst.operands[0].isreg)
6073 {
6074 /* Arg is a register; the opcode provided by insns[] is correct.
6075 It is not illegal to do "blx pc", just useless. */
6076 if (inst.operands[0].reg == REG_PC)
6077 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6078
6079 inst.instruction |= inst.operands[0].reg;
6080 }
6081 else
6082 {
6083 /* Arg is an address; this instruction cannot be executed
6084 conditionally, and the opcode must be adjusted. */
6085 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6086 inst.instruction = 0xfa000000;
6087 #ifdef OBJ_ELF
6088 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6089 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6090 else
6091 #endif
6092 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6093 }
6094 }
6095
6096 static void
6097 do_bx (void)
6098 {
6099 if (inst.operands[0].reg == REG_PC)
6100 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6101
6102 inst.instruction |= inst.operands[0].reg;
6103 }
6104
6105
6106 /* ARM v5TEJ. Jump to Jazelle code. */
6107
6108 static void
6109 do_bxj (void)
6110 {
6111 if (inst.operands[0].reg == REG_PC)
6112 as_tsktsk (_("use of r15 in bxj is not really useful"));
6113
6114 inst.instruction |= inst.operands[0].reg;
6115 }
6116
6117 /* Co-processor data operation:
6118 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6119 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6120 static void
6121 do_cdp (void)
6122 {
6123 inst.instruction |= inst.operands[0].reg << 8;
6124 inst.instruction |= inst.operands[1].imm << 20;
6125 inst.instruction |= inst.operands[2].reg << 12;
6126 inst.instruction |= inst.operands[3].reg << 16;
6127 inst.instruction |= inst.operands[4].reg;
6128 inst.instruction |= inst.operands[5].imm << 5;
6129 }
6130
6131 static void
6132 do_cmp (void)
6133 {
6134 inst.instruction |= inst.operands[0].reg << 16;
6135 encode_arm_shifter_operand (1);
6136 }
6137
6138 /* Transfer between coprocessor and ARM registers.
6139 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6140 MRC2
6141 MCR{cond}
6142 MCR2
6143
6144 No special properties. */
6145
6146 static void
6147 do_co_reg (void)
6148 {
6149 inst.instruction |= inst.operands[0].reg << 8;
6150 inst.instruction |= inst.operands[1].imm << 21;
6151 inst.instruction |= inst.operands[2].reg << 12;
6152 inst.instruction |= inst.operands[3].reg << 16;
6153 inst.instruction |= inst.operands[4].reg;
6154 inst.instruction |= inst.operands[5].imm << 5;
6155 }
6156
6157 /* Transfer between coprocessor register and pair of ARM registers.
6158 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6159 MCRR2
6160 MRRC{cond}
6161 MRRC2
6162
6163 Two XScale instructions are special cases of these:
6164
6165 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6166 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6167
6168 Result unpredicatable if Rd or Rn is R15. */
6169
6170 static void
6171 do_co_reg2c (void)
6172 {
6173 inst.instruction |= inst.operands[0].reg << 8;
6174 inst.instruction |= inst.operands[1].imm << 4;
6175 inst.instruction |= inst.operands[2].reg << 12;
6176 inst.instruction |= inst.operands[3].reg << 16;
6177 inst.instruction |= inst.operands[4].reg;
6178 }
6179
6180 static void
6181 do_cpsi (void)
6182 {
6183 inst.instruction |= inst.operands[0].imm << 6;
6184 inst.instruction |= inst.operands[1].imm;
6185 }
6186
6187 static void
6188 do_dbg (void)
6189 {
6190 inst.instruction |= inst.operands[0].imm;
6191 }
6192
6193 static void
6194 do_it (void)
6195 {
6196 /* There is no IT instruction in ARM mode. We
6197 process it but do not generate code for it. */
6198 inst.size = 0;
6199 }
6200
6201 static void
6202 do_ldmstm (void)
6203 {
6204 int base_reg = inst.operands[0].reg;
6205 int range = inst.operands[1].imm;
6206
6207 inst.instruction |= base_reg << 16;
6208 inst.instruction |= range;
6209
6210 if (inst.operands[1].writeback)
6211 inst.instruction |= LDM_TYPE_2_OR_3;
6212
6213 if (inst.operands[0].writeback)
6214 {
6215 inst.instruction |= WRITE_BACK;
6216 /* Check for unpredictable uses of writeback. */
6217 if (inst.instruction & LOAD_BIT)
6218 {
6219 /* Not allowed in LDM type 2. */
6220 if ((inst.instruction & LDM_TYPE_2_OR_3)
6221 && ((range & (1 << REG_PC)) == 0))
6222 as_warn (_("writeback of base register is UNPREDICTABLE"));
6223 /* Only allowed if base reg not in list for other types. */
6224 else if (range & (1 << base_reg))
6225 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6226 }
6227 else /* STM. */
6228 {
6229 /* Not allowed for type 2. */
6230 if (inst.instruction & LDM_TYPE_2_OR_3)
6231 as_warn (_("writeback of base register is UNPREDICTABLE"));
6232 /* Only allowed if base reg not in list, or first in list. */
6233 else if ((range & (1 << base_reg))
6234 && (range & ((1 << base_reg) - 1)))
6235 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6236 }
6237 }
6238 }
6239
6240 /* ARMv5TE load-consecutive (argument parse)
6241 Mode is like LDRH.
6242
6243 LDRccD R, mode
6244 STRccD R, mode. */
6245
6246 static void
6247 do_ldrd (void)
6248 {
6249 constraint (inst.operands[0].reg % 2 != 0,
6250 _("first destination register must be even"));
6251 constraint (inst.operands[1].present
6252 && inst.operands[1].reg != inst.operands[0].reg + 1,
6253 _("can only load two consecutive registers"));
6254 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6255 constraint (!inst.operands[2].isreg, _("'[' expected"));
6256
6257 if (!inst.operands[1].present)
6258 inst.operands[1].reg = inst.operands[0].reg + 1;
6259
6260 if (inst.instruction & LOAD_BIT)
6261 {
6262 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6263 register and the first register written; we have to diagnose
6264 overlap between the base and the second register written here. */
6265
6266 if (inst.operands[2].reg == inst.operands[1].reg
6267 && (inst.operands[2].writeback || inst.operands[2].postind))
6268 as_warn (_("base register written back, and overlaps "
6269 "second destination register"));
6270
6271 /* For an index-register load, the index register must not overlap the
6272 destination (even if not write-back). */
6273 else if (inst.operands[2].immisreg
6274 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6275 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6276 as_warn (_("index register overlaps destination register"));
6277 }
6278
6279 inst.instruction |= inst.operands[0].reg << 12;
6280 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6281 }
6282
6283 static void
6284 do_ldrex (void)
6285 {
6286 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6287 || inst.operands[1].postind || inst.operands[1].writeback
6288 || inst.operands[1].immisreg || inst.operands[1].shifted
6289 || inst.operands[1].negative
6290 /* This can arise if the programmer has written
6291 strex rN, rM, foo
6292 or if they have mistakenly used a register name as the last
6293 operand, eg:
6294 strex rN, rM, rX
6295 It is very difficult to distinguish between these two cases
6296 because "rX" might actually be a label. ie the register
6297 name has been occluded by a symbol of the same name. So we
6298 just generate a general 'bad addressing mode' type error
6299 message and leave it up to the programmer to discover the
6300 true cause and fix their mistake. */
6301 || (inst.operands[1].reg == REG_PC),
6302 BAD_ADDR_MODE);
6303
6304 constraint (inst.reloc.exp.X_op != O_constant
6305 || inst.reloc.exp.X_add_number != 0,
6306 _("offset must be zero in ARM encoding"));
6307
6308 inst.instruction |= inst.operands[0].reg << 12;
6309 inst.instruction |= inst.operands[1].reg << 16;
6310 inst.reloc.type = BFD_RELOC_UNUSED;
6311 }
6312
6313 static void
6314 do_ldrexd (void)
6315 {
6316 constraint (inst.operands[0].reg % 2 != 0,
6317 _("even register required"));
6318 constraint (inst.operands[1].present
6319 && inst.operands[1].reg != inst.operands[0].reg + 1,
6320 _("can only load two consecutive registers"));
6321 /* If op 1 were present and equal to PC, this function wouldn't
6322 have been called in the first place. */
6323 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6324
6325 inst.instruction |= inst.operands[0].reg << 12;
6326 inst.instruction |= inst.operands[2].reg << 16;
6327 }
6328
6329 static void
6330 do_ldst (void)
6331 {
6332 inst.instruction |= inst.operands[0].reg << 12;
6333 if (!inst.operands[1].isreg)
6334 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6335 return;
6336 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6337 }
6338
6339 static void
6340 do_ldstt (void)
6341 {
6342 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6343 reject [Rn,...]. */
6344 if (inst.operands[1].preind)
6345 {
6346 constraint (inst.reloc.exp.X_op != O_constant ||
6347 inst.reloc.exp.X_add_number != 0,
6348 _("this instruction requires a post-indexed address"));
6349
6350 inst.operands[1].preind = 0;
6351 inst.operands[1].postind = 1;
6352 inst.operands[1].writeback = 1;
6353 }
6354 inst.instruction |= inst.operands[0].reg << 12;
6355 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6356 }
6357
6358 /* Halfword and signed-byte load/store operations. */
6359
6360 static void
6361 do_ldstv4 (void)
6362 {
6363 inst.instruction |= inst.operands[0].reg << 12;
6364 if (!inst.operands[1].isreg)
6365 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
6366 return;
6367 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
6368 }
6369
6370 static void
6371 do_ldsttv4 (void)
6372 {
6373 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6374 reject [Rn,...]. */
6375 if (inst.operands[1].preind)
6376 {
6377 constraint (inst.reloc.exp.X_op != O_constant ||
6378 inst.reloc.exp.X_add_number != 0,
6379 _("this instruction requires a post-indexed address"));
6380
6381 inst.operands[1].preind = 0;
6382 inst.operands[1].postind = 1;
6383 inst.operands[1].writeback = 1;
6384 }
6385 inst.instruction |= inst.operands[0].reg << 12;
6386 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
6387 }
6388
6389 /* Co-processor register load/store.
6390 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6391 static void
6392 do_lstc (void)
6393 {
6394 inst.instruction |= inst.operands[0].reg << 8;
6395 inst.instruction |= inst.operands[1].reg << 12;
6396 encode_arm_cp_address (2, TRUE, TRUE, 0);
6397 }
6398
6399 static void
6400 do_mlas (void)
6401 {
6402 /* This restriction does not apply to mls (nor to mla in v6, but
6403 that's hard to detect at present). */
6404 if (inst.operands[0].reg == inst.operands[1].reg
6405 && !(inst.instruction & 0x00400000))
6406 as_tsktsk (_("rd and rm should be different in mla"));
6407
6408 inst.instruction |= inst.operands[0].reg << 16;
6409 inst.instruction |= inst.operands[1].reg;
6410 inst.instruction |= inst.operands[2].reg << 8;
6411 inst.instruction |= inst.operands[3].reg << 12;
6412
6413 }
6414
6415 static void
6416 do_mov (void)
6417 {
6418 inst.instruction |= inst.operands[0].reg << 12;
6419 encode_arm_shifter_operand (1);
6420 }
6421
6422 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6423 static void
6424 do_mov16 (void)
6425 {
6426 inst.instruction |= inst.operands[0].reg << 12;
6427 /* The value is in two pieces: 0:11, 16:19. */
6428 inst.instruction |= (inst.operands[1].imm & 0x00000fff);
6429 inst.instruction |= (inst.operands[1].imm & 0x0000f000) << 4;
6430 }
6431
6432 static void
6433 do_mrs (void)
6434 {
6435 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6436 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
6437 != (PSR_c|PSR_f),
6438 _("'CPSR' or 'SPSR' expected"));
6439 inst.instruction |= inst.operands[0].reg << 12;
6440 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
6441 }
6442
6443 /* Two possible forms:
6444 "{C|S}PSR_<field>, Rm",
6445 "{C|S}PSR_f, #expression". */
6446
6447 static void
6448 do_msr (void)
6449 {
6450 inst.instruction |= inst.operands[0].imm;
6451 if (inst.operands[1].isreg)
6452 inst.instruction |= inst.operands[1].reg;
6453 else
6454 {
6455 inst.instruction |= INST_IMMEDIATE;
6456 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6457 inst.reloc.pc_rel = 0;
6458 }
6459 }
6460
6461 static void
6462 do_mul (void)
6463 {
6464 if (!inst.operands[2].present)
6465 inst.operands[2].reg = inst.operands[0].reg;
6466 inst.instruction |= inst.operands[0].reg << 16;
6467 inst.instruction |= inst.operands[1].reg;
6468 inst.instruction |= inst.operands[2].reg << 8;
6469
6470 if (inst.operands[0].reg == inst.operands[1].reg)
6471 as_tsktsk (_("rd and rm should be different in mul"));
6472 }
6473
6474 /* Long Multiply Parser
6475 UMULL RdLo, RdHi, Rm, Rs
6476 SMULL RdLo, RdHi, Rm, Rs
6477 UMLAL RdLo, RdHi, Rm, Rs
6478 SMLAL RdLo, RdHi, Rm, Rs. */
6479
6480 static void
6481 do_mull (void)
6482 {
6483 inst.instruction |= inst.operands[0].reg << 12;
6484 inst.instruction |= inst.operands[1].reg << 16;
6485 inst.instruction |= inst.operands[2].reg;
6486 inst.instruction |= inst.operands[3].reg << 8;
6487
6488 /* rdhi, rdlo and rm must all be different. */
6489 if (inst.operands[0].reg == inst.operands[1].reg
6490 || inst.operands[0].reg == inst.operands[2].reg
6491 || inst.operands[1].reg == inst.operands[2].reg)
6492 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6493 }
6494
6495 static void
6496 do_nop (void)
6497 {
6498 if (inst.operands[0].present)
6499 {
6500 /* Architectural NOP hints are CPSR sets with no bits selected. */
6501 inst.instruction &= 0xf0000000;
6502 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
6503 }
6504 }
6505
6506 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6507 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6508 Condition defaults to COND_ALWAYS.
6509 Error if Rd, Rn or Rm are R15. */
6510
6511 static void
6512 do_pkhbt (void)
6513 {
6514 inst.instruction |= inst.operands[0].reg << 12;
6515 inst.instruction |= inst.operands[1].reg << 16;
6516 inst.instruction |= inst.operands[2].reg;
6517 if (inst.operands[3].present)
6518 encode_arm_shift (3);
6519 }
6520
6521 /* ARM V6 PKHTB (Argument Parse). */
6522
6523 static void
6524 do_pkhtb (void)
6525 {
6526 if (!inst.operands[3].present)
6527 {
6528 /* If the shift specifier is omitted, turn the instruction
6529 into pkhbt rd, rm, rn. */
6530 inst.instruction &= 0xfff00010;
6531 inst.instruction |= inst.operands[0].reg << 12;
6532 inst.instruction |= inst.operands[1].reg;
6533 inst.instruction |= inst.operands[2].reg << 16;
6534 }
6535 else
6536 {
6537 inst.instruction |= inst.operands[0].reg << 12;
6538 inst.instruction |= inst.operands[1].reg << 16;
6539 inst.instruction |= inst.operands[2].reg;
6540 encode_arm_shift (3);
6541 }
6542 }
6543
6544 /* ARMv5TE: Preload-Cache
6545
6546 PLD <addr_mode>
6547
6548 Syntactically, like LDR with B=1, W=0, L=1. */
6549
6550 static void
6551 do_pld (void)
6552 {
6553 constraint (!inst.operands[0].isreg,
6554 _("'[' expected after PLD mnemonic"));
6555 constraint (inst.operands[0].postind,
6556 _("post-indexed expression used in preload instruction"));
6557 constraint (inst.operands[0].writeback,
6558 _("writeback used in preload instruction"));
6559 constraint (!inst.operands[0].preind,
6560 _("unindexed addressing used in preload instruction"));
6561 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6562 }
6563
6564 /* ARMv7: PLI <addr_mode> */
6565 static void
6566 do_pli (void)
6567 {
6568 constraint (!inst.operands[0].isreg,
6569 _("'[' expected after PLI mnemonic"));
6570 constraint (inst.operands[0].postind,
6571 _("post-indexed expression used in preload instruction"));
6572 constraint (inst.operands[0].writeback,
6573 _("writeback used in preload instruction"));
6574 constraint (!inst.operands[0].preind,
6575 _("unindexed addressing used in preload instruction"));
6576 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6577 inst.instruction &= ~PRE_INDEX;
6578 }
6579
6580 static void
6581 do_push_pop (void)
6582 {
6583 inst.operands[1] = inst.operands[0];
6584 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
6585 inst.operands[0].isreg = 1;
6586 inst.operands[0].writeback = 1;
6587 inst.operands[0].reg = REG_SP;
6588 do_ldmstm ();
6589 }
6590
6591 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6592 word at the specified address and the following word
6593 respectively.
6594 Unconditionally executed.
6595 Error if Rn is R15. */
6596
6597 static void
6598 do_rfe (void)
6599 {
6600 inst.instruction |= inst.operands[0].reg << 16;
6601 if (inst.operands[0].writeback)
6602 inst.instruction |= WRITE_BACK;
6603 }
6604
6605 /* ARM V6 ssat (argument parse). */
6606
6607 static void
6608 do_ssat (void)
6609 {
6610 inst.instruction |= inst.operands[0].reg << 12;
6611 inst.instruction |= (inst.operands[1].imm - 1) << 16;
6612 inst.instruction |= inst.operands[2].reg;
6613
6614 if (inst.operands[3].present)
6615 encode_arm_shift (3);
6616 }
6617
6618 /* ARM V6 usat (argument parse). */
6619
6620 static void
6621 do_usat (void)
6622 {
6623 inst.instruction |= inst.operands[0].reg << 12;
6624 inst.instruction |= inst.operands[1].imm << 16;
6625 inst.instruction |= inst.operands[2].reg;
6626
6627 if (inst.operands[3].present)
6628 encode_arm_shift (3);
6629 }
6630
6631 /* ARM V6 ssat16 (argument parse). */
6632
6633 static void
6634 do_ssat16 (void)
6635 {
6636 inst.instruction |= inst.operands[0].reg << 12;
6637 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
6638 inst.instruction |= inst.operands[2].reg;
6639 }
6640
6641 static void
6642 do_usat16 (void)
6643 {
6644 inst.instruction |= inst.operands[0].reg << 12;
6645 inst.instruction |= inst.operands[1].imm << 16;
6646 inst.instruction |= inst.operands[2].reg;
6647 }
6648
6649 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6650 preserving the other bits.
6651
6652 setend <endian_specifier>, where <endian_specifier> is either
6653 BE or LE. */
6654
6655 static void
6656 do_setend (void)
6657 {
6658 if (inst.operands[0].imm)
6659 inst.instruction |= 0x200;
6660 }
6661
6662 static void
6663 do_shift (void)
6664 {
6665 unsigned int Rm = (inst.operands[1].present
6666 ? inst.operands[1].reg
6667 : inst.operands[0].reg);
6668
6669 inst.instruction |= inst.operands[0].reg << 12;
6670 inst.instruction |= Rm;
6671 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
6672 {
6673 inst.instruction |= inst.operands[2].reg << 8;
6674 inst.instruction |= SHIFT_BY_REG;
6675 }
6676 else
6677 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6678 }
6679
6680 static void
6681 do_smc (void)
6682 {
6683 inst.reloc.type = BFD_RELOC_ARM_SMC;
6684 inst.reloc.pc_rel = 0;
6685 }
6686
6687 static void
6688 do_swi (void)
6689 {
6690 inst.reloc.type = BFD_RELOC_ARM_SWI;
6691 inst.reloc.pc_rel = 0;
6692 }
6693
6694 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6695 SMLAxy{cond} Rd,Rm,Rs,Rn
6696 SMLAWy{cond} Rd,Rm,Rs,Rn
6697 Error if any register is R15. */
6698
6699 static void
6700 do_smla (void)
6701 {
6702 inst.instruction |= inst.operands[0].reg << 16;
6703 inst.instruction |= inst.operands[1].reg;
6704 inst.instruction |= inst.operands[2].reg << 8;
6705 inst.instruction |= inst.operands[3].reg << 12;
6706 }
6707
6708 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6709 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6710 Error if any register is R15.
6711 Warning if Rdlo == Rdhi. */
6712
6713 static void
6714 do_smlal (void)
6715 {
6716 inst.instruction |= inst.operands[0].reg << 12;
6717 inst.instruction |= inst.operands[1].reg << 16;
6718 inst.instruction |= inst.operands[2].reg;
6719 inst.instruction |= inst.operands[3].reg << 8;
6720
6721 if (inst.operands[0].reg == inst.operands[1].reg)
6722 as_tsktsk (_("rdhi and rdlo must be different"));
6723 }
6724
6725 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6726 SMULxy{cond} Rd,Rm,Rs
6727 Error if any register is R15. */
6728
6729 static void
6730 do_smul (void)
6731 {
6732 inst.instruction |= inst.operands[0].reg << 16;
6733 inst.instruction |= inst.operands[1].reg;
6734 inst.instruction |= inst.operands[2].reg << 8;
6735 }
6736
6737 /* ARM V6 srs (argument parse). */
6738
6739 static void
6740 do_srs (void)
6741 {
6742 inst.instruction |= inst.operands[0].imm;
6743 if (inst.operands[0].writeback)
6744 inst.instruction |= WRITE_BACK;
6745 }
6746
6747 /* ARM V6 strex (argument parse). */
6748
6749 static void
6750 do_strex (void)
6751 {
6752 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
6753 || inst.operands[2].postind || inst.operands[2].writeback
6754 || inst.operands[2].immisreg || inst.operands[2].shifted
6755 || inst.operands[2].negative
6756 /* See comment in do_ldrex(). */
6757 || (inst.operands[2].reg == REG_PC),
6758 BAD_ADDR_MODE);
6759
6760 constraint (inst.operands[0].reg == inst.operands[1].reg
6761 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
6762
6763 constraint (inst.reloc.exp.X_op != O_constant
6764 || inst.reloc.exp.X_add_number != 0,
6765 _("offset must be zero in ARM encoding"));
6766
6767 inst.instruction |= inst.operands[0].reg << 12;
6768 inst.instruction |= inst.operands[1].reg;
6769 inst.instruction |= inst.operands[2].reg << 16;
6770 inst.reloc.type = BFD_RELOC_UNUSED;
6771 }
6772
6773 static void
6774 do_strexd (void)
6775 {
6776 constraint (inst.operands[1].reg % 2 != 0,
6777 _("even register required"));
6778 constraint (inst.operands[2].present
6779 && inst.operands[2].reg != inst.operands[1].reg + 1,
6780 _("can only store two consecutive registers"));
6781 /* If op 2 were present and equal to PC, this function wouldn't
6782 have been called in the first place. */
6783 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
6784
6785 constraint (inst.operands[0].reg == inst.operands[1].reg
6786 || inst.operands[0].reg == inst.operands[1].reg + 1
6787 || inst.operands[0].reg == inst.operands[3].reg,
6788 BAD_OVERLAP);
6789
6790 inst.instruction |= inst.operands[0].reg << 12;
6791 inst.instruction |= inst.operands[1].reg;
6792 inst.instruction |= inst.operands[3].reg << 16;
6793 }
6794
6795 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6796 extends it to 32-bits, and adds the result to a value in another
6797 register. You can specify a rotation by 0, 8, 16, or 24 bits
6798 before extracting the 16-bit value.
6799 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6800 Condition defaults to COND_ALWAYS.
6801 Error if any register uses R15. */
6802
6803 static void
6804 do_sxtah (void)
6805 {
6806 inst.instruction |= inst.operands[0].reg << 12;
6807 inst.instruction |= inst.operands[1].reg << 16;
6808 inst.instruction |= inst.operands[2].reg;
6809 inst.instruction |= inst.operands[3].imm << 10;
6810 }
6811
6812 /* ARM V6 SXTH.
6813
6814 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6815 Condition defaults to COND_ALWAYS.
6816 Error if any register uses R15. */
6817
6818 static void
6819 do_sxth (void)
6820 {
6821 inst.instruction |= inst.operands[0].reg << 12;
6822 inst.instruction |= inst.operands[1].reg;
6823 inst.instruction |= inst.operands[2].imm << 10;
6824 }
6825 \f
6826 /* VFP instructions. In a logical order: SP variant first, monad
6827 before dyad, arithmetic then move then load/store. */
6828
6829 static void
6830 do_vfp_sp_monadic (void)
6831 {
6832 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6833 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6834 }
6835
6836 static void
6837 do_vfp_sp_dyadic (void)
6838 {
6839 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6840 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6841 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6842 }
6843
6844 static void
6845 do_vfp_sp_compare_z (void)
6846 {
6847 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6848 }
6849
6850 static void
6851 do_vfp_dp_sp_cvt (void)
6852 {
6853 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6854 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6855 }
6856
6857 static void
6858 do_vfp_sp_dp_cvt (void)
6859 {
6860 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6861 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6862 }
6863
6864 static void
6865 do_vfp_reg_from_sp (void)
6866 {
6867 inst.instruction |= inst.operands[0].reg << 12;
6868 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6869 }
6870
6871 static void
6872 do_vfp_reg2_from_sp2 (void)
6873 {
6874 constraint (inst.operands[2].imm != 2,
6875 _("only two consecutive VFP SP registers allowed here"));
6876 inst.instruction |= inst.operands[0].reg << 12;
6877 inst.instruction |= inst.operands[1].reg << 16;
6878 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6879 }
6880
6881 static void
6882 do_vfp_sp_from_reg (void)
6883 {
6884 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
6885 inst.instruction |= inst.operands[1].reg << 12;
6886 }
6887
6888 static void
6889 do_vfp_sp2_from_reg2 (void)
6890 {
6891 constraint (inst.operands[0].imm != 2,
6892 _("only two consecutive VFP SP registers allowed here"));
6893 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
6894 inst.instruction |= inst.operands[1].reg << 12;
6895 inst.instruction |= inst.operands[2].reg << 16;
6896 }
6897
6898 static void
6899 do_vfp_sp_ldst (void)
6900 {
6901 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6902 encode_arm_cp_address (1, FALSE, TRUE, 0);
6903 }
6904
6905 static void
6906 do_vfp_dp_ldst (void)
6907 {
6908 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6909 encode_arm_cp_address (1, FALSE, TRUE, 0);
6910 }
6911
6912
6913 static void
6914 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
6915 {
6916 if (inst.operands[0].writeback)
6917 inst.instruction |= WRITE_BACK;
6918 else
6919 constraint (ldstm_type != VFP_LDSTMIA,
6920 _("this addressing mode requires base-register writeback"));
6921 inst.instruction |= inst.operands[0].reg << 16;
6922 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
6923 inst.instruction |= inst.operands[1].imm;
6924 }
6925
6926 static void
6927 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
6928 {
6929 int count;
6930
6931 if (inst.operands[0].writeback)
6932 inst.instruction |= WRITE_BACK;
6933 else
6934 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
6935 _("this addressing mode requires base-register writeback"));
6936
6937 inst.instruction |= inst.operands[0].reg << 16;
6938 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
6939
6940 count = inst.operands[1].imm << 1;
6941 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
6942 count += 1;
6943
6944 inst.instruction |= count;
6945 }
6946
6947 static void
6948 do_vfp_sp_ldstmia (void)
6949 {
6950 vfp_sp_ldstm (VFP_LDSTMIA);
6951 }
6952
6953 static void
6954 do_vfp_sp_ldstmdb (void)
6955 {
6956 vfp_sp_ldstm (VFP_LDSTMDB);
6957 }
6958
6959 static void
6960 do_vfp_dp_ldstmia (void)
6961 {
6962 vfp_dp_ldstm (VFP_LDSTMIA);
6963 }
6964
6965 static void
6966 do_vfp_dp_ldstmdb (void)
6967 {
6968 vfp_dp_ldstm (VFP_LDSTMDB);
6969 }
6970
6971 static void
6972 do_vfp_xp_ldstmia (void)
6973 {
6974 vfp_dp_ldstm (VFP_LDSTMIAX);
6975 }
6976
6977 static void
6978 do_vfp_xp_ldstmdb (void)
6979 {
6980 vfp_dp_ldstm (VFP_LDSTMDBX);
6981 }
6982
6983 static void
6984 do_vfp_dp_rd_rm (void)
6985 {
6986 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6987 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6988 }
6989
6990 static void
6991 do_vfp_dp_rn_rd (void)
6992 {
6993 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
6994 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
6995 }
6996
6997 static void
6998 do_vfp_dp_rd_rn (void)
6999 {
7000 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7001 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7002 }
7003
7004 static void
7005 do_vfp_dp_rd_rn_rm (void)
7006 {
7007 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7008 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7009 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7010 }
7011
7012 static void
7013 do_vfp_dp_rd (void)
7014 {
7015 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7016 }
7017
7018 static void
7019 do_vfp_dp_rm_rd_rn (void)
7020 {
7021 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7022 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7023 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7024 }
7025
7026 /* VFPv3 instructions. */
7027 static void
7028 do_vfp_sp_const (void)
7029 {
7030 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7031 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7032 inst.instruction |= (inst.operands[1].imm >> 4);
7033 }
7034
7035 static void
7036 do_vfp_dp_const (void)
7037 {
7038 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7039 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7040 inst.instruction |= (inst.operands[1].imm >> 4);
7041 }
7042
7043 static void
7044 vfp_conv (int srcsize)
7045 {
7046 unsigned immbits = srcsize - inst.operands[1].imm;
7047 inst.instruction |= (immbits & 1) << 5;
7048 inst.instruction |= (immbits >> 1);
7049 }
7050
7051 static void
7052 do_vfp_sp_conv_16 (void)
7053 {
7054 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7055 vfp_conv (16);
7056 }
7057
7058 static void
7059 do_vfp_dp_conv_16 (void)
7060 {
7061 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7062 vfp_conv (16);
7063 }
7064
7065 static void
7066 do_vfp_sp_conv_32 (void)
7067 {
7068 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7069 vfp_conv (32);
7070 }
7071
7072 static void
7073 do_vfp_dp_conv_32 (void)
7074 {
7075 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7076 vfp_conv (32);
7077 }
7078
7079 \f
7080 /* FPA instructions. Also in a logical order. */
7081
7082 static void
7083 do_fpa_cmp (void)
7084 {
7085 inst.instruction |= inst.operands[0].reg << 16;
7086 inst.instruction |= inst.operands[1].reg;
7087 }
7088
7089 static void
7090 do_fpa_ldmstm (void)
7091 {
7092 inst.instruction |= inst.operands[0].reg << 12;
7093 switch (inst.operands[1].imm)
7094 {
7095 case 1: inst.instruction |= CP_T_X; break;
7096 case 2: inst.instruction |= CP_T_Y; break;
7097 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7098 case 4: break;
7099 default: abort ();
7100 }
7101
7102 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7103 {
7104 /* The instruction specified "ea" or "fd", so we can only accept
7105 [Rn]{!}. The instruction does not really support stacking or
7106 unstacking, so we have to emulate these by setting appropriate
7107 bits and offsets. */
7108 constraint (inst.reloc.exp.X_op != O_constant
7109 || inst.reloc.exp.X_add_number != 0,
7110 _("this instruction does not support indexing"));
7111
7112 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7113 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7114
7115 if (!(inst.instruction & INDEX_UP))
7116 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7117
7118 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7119 {
7120 inst.operands[2].preind = 0;
7121 inst.operands[2].postind = 1;
7122 }
7123 }
7124
7125 encode_arm_cp_address (2, TRUE, TRUE, 0);
7126 }
7127 \f
7128 /* iWMMXt instructions: strictly in alphabetical order. */
7129
7130 static void
7131 do_iwmmxt_tandorc (void)
7132 {
7133 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7134 }
7135
7136 static void
7137 do_iwmmxt_textrc (void)
7138 {
7139 inst.instruction |= inst.operands[0].reg << 12;
7140 inst.instruction |= inst.operands[1].imm;
7141 }
7142
7143 static void
7144 do_iwmmxt_textrm (void)
7145 {
7146 inst.instruction |= inst.operands[0].reg << 12;
7147 inst.instruction |= inst.operands[1].reg << 16;
7148 inst.instruction |= inst.operands[2].imm;
7149 }
7150
7151 static void
7152 do_iwmmxt_tinsr (void)
7153 {
7154 inst.instruction |= inst.operands[0].reg << 16;
7155 inst.instruction |= inst.operands[1].reg << 12;
7156 inst.instruction |= inst.operands[2].imm;
7157 }
7158
7159 static void
7160 do_iwmmxt_tmia (void)
7161 {
7162 inst.instruction |= inst.operands[0].reg << 5;
7163 inst.instruction |= inst.operands[1].reg;
7164 inst.instruction |= inst.operands[2].reg << 12;
7165 }
7166
7167 static void
7168 do_iwmmxt_waligni (void)
7169 {
7170 inst.instruction |= inst.operands[0].reg << 12;
7171 inst.instruction |= inst.operands[1].reg << 16;
7172 inst.instruction |= inst.operands[2].reg;
7173 inst.instruction |= inst.operands[3].imm << 20;
7174 }
7175
7176 static void
7177 do_iwmmxt_wmov (void)
7178 {
7179 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7180 inst.instruction |= inst.operands[0].reg << 12;
7181 inst.instruction |= inst.operands[1].reg << 16;
7182 inst.instruction |= inst.operands[1].reg;
7183 }
7184
7185 static void
7186 do_iwmmxt_wldstbh (void)
7187 {
7188 int reloc;
7189 inst.instruction |= inst.operands[0].reg << 12;
7190 inst.reloc.exp.X_add_number *= 4;
7191 if (thumb_mode)
7192 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7193 else
7194 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7195 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7196 }
7197
7198 static void
7199 do_iwmmxt_wldstw (void)
7200 {
7201 /* RIWR_RIWC clears .isreg for a control register. */
7202 if (!inst.operands[0].isreg)
7203 {
7204 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7205 inst.instruction |= 0xf0000000;
7206 }
7207
7208 inst.instruction |= inst.operands[0].reg << 12;
7209 encode_arm_cp_address (1, TRUE, TRUE, 0);
7210 }
7211
7212 static void
7213 do_iwmmxt_wldstd (void)
7214 {
7215 inst.instruction |= inst.operands[0].reg << 12;
7216 encode_arm_cp_address (1, TRUE, FALSE, 0);
7217 }
7218
7219 static void
7220 do_iwmmxt_wshufh (void)
7221 {
7222 inst.instruction |= inst.operands[0].reg << 12;
7223 inst.instruction |= inst.operands[1].reg << 16;
7224 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7225 inst.instruction |= (inst.operands[2].imm & 0x0f);
7226 }
7227
7228 static void
7229 do_iwmmxt_wzero (void)
7230 {
7231 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7232 inst.instruction |= inst.operands[0].reg;
7233 inst.instruction |= inst.operands[0].reg << 12;
7234 inst.instruction |= inst.operands[0].reg << 16;
7235 }
7236 \f
7237 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7238 operations first, then control, shift, and load/store. */
7239
7240 /* Insns like "foo X,Y,Z". */
7241
7242 static void
7243 do_mav_triple (void)
7244 {
7245 inst.instruction |= inst.operands[0].reg << 16;
7246 inst.instruction |= inst.operands[1].reg;
7247 inst.instruction |= inst.operands[2].reg << 12;
7248 }
7249
7250 /* Insns like "foo W,X,Y,Z".
7251 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7252
7253 static void
7254 do_mav_quad (void)
7255 {
7256 inst.instruction |= inst.operands[0].reg << 5;
7257 inst.instruction |= inst.operands[1].reg << 12;
7258 inst.instruction |= inst.operands[2].reg << 16;
7259 inst.instruction |= inst.operands[3].reg;
7260 }
7261
7262 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7263 static void
7264 do_mav_dspsc (void)
7265 {
7266 inst.instruction |= inst.operands[1].reg << 12;
7267 }
7268
7269 /* Maverick shift immediate instructions.
7270 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7271 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7272
7273 static void
7274 do_mav_shift (void)
7275 {
7276 int imm = inst.operands[2].imm;
7277
7278 inst.instruction |= inst.operands[0].reg << 12;
7279 inst.instruction |= inst.operands[1].reg << 16;
7280
7281 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7282 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7283 Bit 4 should be 0. */
7284 imm = (imm & 0xf) | ((imm & 0x70) << 1);
7285
7286 inst.instruction |= imm;
7287 }
7288 \f
7289 /* XScale instructions. Also sorted arithmetic before move. */
7290
7291 /* Xscale multiply-accumulate (argument parse)
7292 MIAcc acc0,Rm,Rs
7293 MIAPHcc acc0,Rm,Rs
7294 MIAxycc acc0,Rm,Rs. */
7295
7296 static void
7297 do_xsc_mia (void)
7298 {
7299 inst.instruction |= inst.operands[1].reg;
7300 inst.instruction |= inst.operands[2].reg << 12;
7301 }
7302
7303 /* Xscale move-accumulator-register (argument parse)
7304
7305 MARcc acc0,RdLo,RdHi. */
7306
7307 static void
7308 do_xsc_mar (void)
7309 {
7310 inst.instruction |= inst.operands[1].reg << 12;
7311 inst.instruction |= inst.operands[2].reg << 16;
7312 }
7313
7314 /* Xscale move-register-accumulator (argument parse)
7315
7316 MRAcc RdLo,RdHi,acc0. */
7317
7318 static void
7319 do_xsc_mra (void)
7320 {
7321 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
7322 inst.instruction |= inst.operands[0].reg << 12;
7323 inst.instruction |= inst.operands[1].reg << 16;
7324 }
7325 \f
7326 /* Encoding functions relevant only to Thumb. */
7327
7328 /* inst.operands[i] is a shifted-register operand; encode
7329 it into inst.instruction in the format used by Thumb32. */
7330
7331 static void
7332 encode_thumb32_shifted_operand (int i)
7333 {
7334 unsigned int value = inst.reloc.exp.X_add_number;
7335 unsigned int shift = inst.operands[i].shift_kind;
7336
7337 constraint (inst.operands[i].immisreg,
7338 _("shift by register not allowed in thumb mode"));
7339 inst.instruction |= inst.operands[i].reg;
7340 if (shift == SHIFT_RRX)
7341 inst.instruction |= SHIFT_ROR << 4;
7342 else
7343 {
7344 constraint (inst.reloc.exp.X_op != O_constant,
7345 _("expression too complex"));
7346
7347 constraint (value > 32
7348 || (value == 32 && (shift == SHIFT_LSL
7349 || shift == SHIFT_ROR)),
7350 _("shift expression is too large"));
7351
7352 if (value == 0)
7353 shift = SHIFT_LSL;
7354 else if (value == 32)
7355 value = 0;
7356
7357 inst.instruction |= shift << 4;
7358 inst.instruction |= (value & 0x1c) << 10;
7359 inst.instruction |= (value & 0x03) << 6;
7360 }
7361 }
7362
7363
7364 /* inst.operands[i] was set up by parse_address. Encode it into a
7365 Thumb32 format load or store instruction. Reject forms that cannot
7366 be used with such instructions. If is_t is true, reject forms that
7367 cannot be used with a T instruction; if is_d is true, reject forms
7368 that cannot be used with a D instruction. */
7369
7370 static void
7371 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
7372 {
7373 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7374
7375 constraint (!inst.operands[i].isreg,
7376 _("Instruction does not support =N addresses"));
7377
7378 inst.instruction |= inst.operands[i].reg << 16;
7379 if (inst.operands[i].immisreg)
7380 {
7381 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
7382 constraint (is_t || is_d, _("cannot use register index with this instruction"));
7383 constraint (inst.operands[i].negative,
7384 _("Thumb does not support negative register indexing"));
7385 constraint (inst.operands[i].postind,
7386 _("Thumb does not support register post-indexing"));
7387 constraint (inst.operands[i].writeback,
7388 _("Thumb does not support register indexing with writeback"));
7389 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
7390 _("Thumb supports only LSL in shifted register indexing"));
7391
7392 inst.instruction |= inst.operands[i].imm;
7393 if (inst.operands[i].shifted)
7394 {
7395 constraint (inst.reloc.exp.X_op != O_constant,
7396 _("expression too complex"));
7397 constraint (inst.reloc.exp.X_add_number < 0
7398 || inst.reloc.exp.X_add_number > 3,
7399 _("shift out of range"));
7400 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7401 }
7402 inst.reloc.type = BFD_RELOC_UNUSED;
7403 }
7404 else if (inst.operands[i].preind)
7405 {
7406 constraint (is_pc && inst.operands[i].writeback,
7407 _("cannot use writeback with PC-relative addressing"));
7408 constraint (is_t && inst.operands[i].writeback,
7409 _("cannot use writeback with this instruction"));
7410
7411 if (is_d)
7412 {
7413 inst.instruction |= 0x01000000;
7414 if (inst.operands[i].writeback)
7415 inst.instruction |= 0x00200000;
7416 }
7417 else
7418 {
7419 inst.instruction |= 0x00000c00;
7420 if (inst.operands[i].writeback)
7421 inst.instruction |= 0x00000100;
7422 }
7423 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7424 }
7425 else if (inst.operands[i].postind)
7426 {
7427 assert (inst.operands[i].writeback);
7428 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
7429 constraint (is_t, _("cannot use post-indexing with this instruction"));
7430
7431 if (is_d)
7432 inst.instruction |= 0x00200000;
7433 else
7434 inst.instruction |= 0x00000900;
7435 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7436 }
7437 else /* unindexed - only for coprocessor */
7438 inst.error = _("instruction does not accept unindexed addressing");
7439 }
7440
7441 /* Table of Thumb instructions which exist in both 16- and 32-bit
7442 encodings (the latter only in post-V6T2 cores). The index is the
7443 value used in the insns table below. When there is more than one
7444 possible 16-bit encoding for the instruction, this table always
7445 holds variant (1).
7446 Also contains several pseudo-instructions used during relaxation. */
7447 #define T16_32_TAB \
7448 X(adc, 4140, eb400000), \
7449 X(adcs, 4140, eb500000), \
7450 X(add, 1c00, eb000000), \
7451 X(adds, 1c00, eb100000), \
7452 X(addi, 0000, f1000000), \
7453 X(addis, 0000, f1100000), \
7454 X(add_pc,000f, f20f0000), \
7455 X(add_sp,000d, f10d0000), \
7456 X(adr, 000f, f20f0000), \
7457 X(and, 4000, ea000000), \
7458 X(ands, 4000, ea100000), \
7459 X(asr, 1000, fa40f000), \
7460 X(asrs, 1000, fa50f000), \
7461 X(b, e000, f000b000), \
7462 X(bcond, d000, f0008000), \
7463 X(bic, 4380, ea200000), \
7464 X(bics, 4380, ea300000), \
7465 X(cmn, 42c0, eb100f00), \
7466 X(cmp, 2800, ebb00f00), \
7467 X(cpsie, b660, f3af8400), \
7468 X(cpsid, b670, f3af8600), \
7469 X(cpy, 4600, ea4f0000), \
7470 X(dec_sp,80dd, f1bd0d00), \
7471 X(eor, 4040, ea800000), \
7472 X(eors, 4040, ea900000), \
7473 X(inc_sp,00dd, f10d0d00), \
7474 X(ldmia, c800, e8900000), \
7475 X(ldr, 6800, f8500000), \
7476 X(ldrb, 7800, f8100000), \
7477 X(ldrh, 8800, f8300000), \
7478 X(ldrsb, 5600, f9100000), \
7479 X(ldrsh, 5e00, f9300000), \
7480 X(ldr_pc,4800, f85f0000), \
7481 X(ldr_pc2,4800, f85f0000), \
7482 X(ldr_sp,9800, f85d0000), \
7483 X(lsl, 0000, fa00f000), \
7484 X(lsls, 0000, fa10f000), \
7485 X(lsr, 0800, fa20f000), \
7486 X(lsrs, 0800, fa30f000), \
7487 X(mov, 2000, ea4f0000), \
7488 X(movs, 2000, ea5f0000), \
7489 X(mul, 4340, fb00f000), \
7490 X(muls, 4340, ffffffff), /* no 32b muls */ \
7491 X(mvn, 43c0, ea6f0000), \
7492 X(mvns, 43c0, ea7f0000), \
7493 X(neg, 4240, f1c00000), /* rsb #0 */ \
7494 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7495 X(orr, 4300, ea400000), \
7496 X(orrs, 4300, ea500000), \
7497 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7498 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7499 X(rev, ba00, fa90f080), \
7500 X(rev16, ba40, fa90f090), \
7501 X(revsh, bac0, fa90f0b0), \
7502 X(ror, 41c0, fa60f000), \
7503 X(rors, 41c0, fa70f000), \
7504 X(sbc, 4180, eb600000), \
7505 X(sbcs, 4180, eb700000), \
7506 X(stmia, c000, e8800000), \
7507 X(str, 6000, f8400000), \
7508 X(strb, 7000, f8000000), \
7509 X(strh, 8000, f8200000), \
7510 X(str_sp,9000, f84d0000), \
7511 X(sub, 1e00, eba00000), \
7512 X(subs, 1e00, ebb00000), \
7513 X(subi, 8000, f1a00000), \
7514 X(subis, 8000, f1b00000), \
7515 X(sxtb, b240, fa4ff080), \
7516 X(sxth, b200, fa0ff080), \
7517 X(tst, 4200, ea100f00), \
7518 X(uxtb, b2c0, fa5ff080), \
7519 X(uxth, b280, fa1ff080), \
7520 X(nop, bf00, f3af8000), \
7521 X(yield, bf10, f3af8001), \
7522 X(wfe, bf20, f3af8002), \
7523 X(wfi, bf30, f3af8003), \
7524 X(sev, bf40, f3af9004), /* typo, 8004? */
7525
7526 /* To catch errors in encoding functions, the codes are all offset by
7527 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7528 as 16-bit instructions. */
7529 #define X(a,b,c) T_MNEM_##a
7530 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
7531 #undef X
7532
7533 #define X(a,b,c) 0x##b
7534 static const unsigned short thumb_op16[] = { T16_32_TAB };
7535 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7536 #undef X
7537
7538 #define X(a,b,c) 0x##c
7539 static const unsigned int thumb_op32[] = { T16_32_TAB };
7540 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7541 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7542 #undef X
7543 #undef T16_32_TAB
7544
7545 /* Thumb instruction encoders, in alphabetical order. */
7546
7547 /* ADDW or SUBW. */
7548 static void
7549 do_t_add_sub_w (void)
7550 {
7551 int Rd, Rn;
7552
7553 Rd = inst.operands[0].reg;
7554 Rn = inst.operands[1].reg;
7555
7556 constraint (Rd == 15, _("PC not allowed as destination"));
7557 inst.instruction |= (Rn << 16) | (Rd << 8);
7558 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
7559 }
7560
7561 /* Parse an add or subtract instruction. We get here with inst.instruction
7562 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7563
7564 static void
7565 do_t_add_sub (void)
7566 {
7567 int Rd, Rs, Rn;
7568
7569 Rd = inst.operands[0].reg;
7570 Rs = (inst.operands[1].present
7571 ? inst.operands[1].reg /* Rd, Rs, foo */
7572 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7573
7574 if (unified_syntax)
7575 {
7576 bfd_boolean flags;
7577 bfd_boolean narrow;
7578 int opcode;
7579
7580 flags = (inst.instruction == T_MNEM_adds
7581 || inst.instruction == T_MNEM_subs);
7582 if (flags)
7583 narrow = (current_it_mask == 0);
7584 else
7585 narrow = (current_it_mask != 0);
7586 if (!inst.operands[2].isreg)
7587 {
7588 opcode = 0;
7589 if (inst.size_req != 4)
7590 {
7591 int add;
7592
7593 add = (inst.instruction == T_MNEM_add
7594 || inst.instruction == T_MNEM_adds);
7595 /* Attempt to use a narrow opcode, with relaxation if
7596 appropriate. */
7597 if (Rd == REG_SP && Rs == REG_SP && !flags)
7598 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
7599 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
7600 opcode = T_MNEM_add_sp;
7601 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
7602 opcode = T_MNEM_add_pc;
7603 else if (Rd <= 7 && Rs <= 7 && narrow)
7604 {
7605 if (flags)
7606 opcode = add ? T_MNEM_addis : T_MNEM_subis;
7607 else
7608 opcode = add ? T_MNEM_addi : T_MNEM_subi;
7609 }
7610 if (opcode)
7611 {
7612 inst.instruction = THUMB_OP16(opcode);
7613 inst.instruction |= (Rd << 4) | Rs;
7614 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7615 if (inst.size_req != 2)
7616 inst.relax = opcode;
7617 }
7618 else
7619 constraint (inst.size_req == 2, BAD_HIREG);
7620 }
7621 if (inst.size_req == 4
7622 || (inst.size_req != 2 && !opcode))
7623 {
7624 /* ??? Convert large immediates to addw/subw. */
7625 inst.instruction = THUMB_OP32 (inst.instruction);
7626 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7627 inst.instruction |= inst.operands[0].reg << 8;
7628 inst.instruction |= inst.operands[1].reg << 16;
7629 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7630 }
7631 }
7632 else
7633 {
7634 Rn = inst.operands[2].reg;
7635 /* See if we can do this with a 16-bit instruction. */
7636 if (!inst.operands[2].shifted && inst.size_req != 4)
7637 {
7638 if (Rd > 7 || Rs > 7 || Rn > 7)
7639 narrow = FALSE;
7640
7641 if (narrow)
7642 {
7643 inst.instruction = ((inst.instruction == T_MNEM_adds
7644 || inst.instruction == T_MNEM_add)
7645 ? T_OPCODE_ADD_R3
7646 : T_OPCODE_SUB_R3);
7647 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7648 return;
7649 }
7650
7651 if (inst.instruction == T_MNEM_add)
7652 {
7653 if (Rd == Rs)
7654 {
7655 inst.instruction = T_OPCODE_ADD_HI;
7656 inst.instruction |= (Rd & 8) << 4;
7657 inst.instruction |= (Rd & 7);
7658 inst.instruction |= Rn << 3;
7659 return;
7660 }
7661 /* ... because addition is commutative! */
7662 else if (Rd == Rn)
7663 {
7664 inst.instruction = T_OPCODE_ADD_HI;
7665 inst.instruction |= (Rd & 8) << 4;
7666 inst.instruction |= (Rd & 7);
7667 inst.instruction |= Rs << 3;
7668 return;
7669 }
7670 }
7671 }
7672 /* If we get here, it can't be done in 16 bits. */
7673 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
7674 _("shift must be constant"));
7675 inst.instruction = THUMB_OP32 (inst.instruction);
7676 inst.instruction |= Rd << 8;
7677 inst.instruction |= Rs << 16;
7678 encode_thumb32_shifted_operand (2);
7679 }
7680 }
7681 else
7682 {
7683 constraint (inst.instruction == T_MNEM_adds
7684 || inst.instruction == T_MNEM_subs,
7685 BAD_THUMB32);
7686
7687 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
7688 {
7689 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
7690 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
7691 BAD_HIREG);
7692
7693 inst.instruction = (inst.instruction == T_MNEM_add
7694 ? 0x0000 : 0x8000);
7695 inst.instruction |= (Rd << 4) | Rs;
7696 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7697 return;
7698 }
7699
7700 Rn = inst.operands[2].reg;
7701 constraint (inst.operands[2].shifted, _("unshifted register required"));
7702
7703 /* We now have Rd, Rs, and Rn set to registers. */
7704 if (Rd > 7 || Rs > 7 || Rn > 7)
7705 {
7706 /* Can't do this for SUB. */
7707 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
7708 inst.instruction = T_OPCODE_ADD_HI;
7709 inst.instruction |= (Rd & 8) << 4;
7710 inst.instruction |= (Rd & 7);
7711 if (Rs == Rd)
7712 inst.instruction |= Rn << 3;
7713 else if (Rn == Rd)
7714 inst.instruction |= Rs << 3;
7715 else
7716 constraint (1, _("dest must overlap one source register"));
7717 }
7718 else
7719 {
7720 inst.instruction = (inst.instruction == T_MNEM_add
7721 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
7722 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7723 }
7724 }
7725 }
7726
7727 static void
7728 do_t_adr (void)
7729 {
7730 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
7731 {
7732 /* Defer to section relaxation. */
7733 inst.relax = inst.instruction;
7734 inst.instruction = THUMB_OP16 (inst.instruction);
7735 inst.instruction |= inst.operands[0].reg << 4;
7736 }
7737 else if (unified_syntax && inst.size_req != 2)
7738 {
7739 /* Generate a 32-bit opcode. */
7740 inst.instruction = THUMB_OP32 (inst.instruction);
7741 inst.instruction |= inst.operands[0].reg << 8;
7742 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
7743 inst.reloc.pc_rel = 1;
7744 }
7745 else
7746 {
7747 /* Generate a 16-bit opcode. */
7748 inst.instruction = THUMB_OP16 (inst.instruction);
7749 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7750 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
7751 inst.reloc.pc_rel = 1;
7752
7753 inst.instruction |= inst.operands[0].reg << 4;
7754 }
7755 }
7756
7757 /* Arithmetic instructions for which there is just one 16-bit
7758 instruction encoding, and it allows only two low registers.
7759 For maximal compatibility with ARM syntax, we allow three register
7760 operands even when Thumb-32 instructions are not available, as long
7761 as the first two are identical. For instance, both "sbc r0,r1" and
7762 "sbc r0,r0,r1" are allowed. */
7763 static void
7764 do_t_arit3 (void)
7765 {
7766 int Rd, Rs, Rn;
7767
7768 Rd = inst.operands[0].reg;
7769 Rs = (inst.operands[1].present
7770 ? inst.operands[1].reg /* Rd, Rs, foo */
7771 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7772 Rn = inst.operands[2].reg;
7773
7774 if (unified_syntax)
7775 {
7776 if (!inst.operands[2].isreg)
7777 {
7778 /* For an immediate, we always generate a 32-bit opcode;
7779 section relaxation will shrink it later if possible. */
7780 inst.instruction = THUMB_OP32 (inst.instruction);
7781 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7782 inst.instruction |= Rd << 8;
7783 inst.instruction |= Rs << 16;
7784 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7785 }
7786 else
7787 {
7788 bfd_boolean narrow;
7789
7790 /* See if we can do this with a 16-bit instruction. */
7791 if (THUMB_SETS_FLAGS (inst.instruction))
7792 narrow = current_it_mask == 0;
7793 else
7794 narrow = current_it_mask != 0;
7795
7796 if (Rd > 7 || Rn > 7 || Rs > 7)
7797 narrow = FALSE;
7798 if (inst.operands[2].shifted)
7799 narrow = FALSE;
7800 if (inst.size_req == 4)
7801 narrow = FALSE;
7802
7803 if (narrow
7804 && Rd == Rs)
7805 {
7806 inst.instruction = THUMB_OP16 (inst.instruction);
7807 inst.instruction |= Rd;
7808 inst.instruction |= Rn << 3;
7809 return;
7810 }
7811
7812 /* If we get here, it can't be done in 16 bits. */
7813 constraint (inst.operands[2].shifted
7814 && inst.operands[2].immisreg,
7815 _("shift must be constant"));
7816 inst.instruction = THUMB_OP32 (inst.instruction);
7817 inst.instruction |= Rd << 8;
7818 inst.instruction |= Rs << 16;
7819 encode_thumb32_shifted_operand (2);
7820 }
7821 }
7822 else
7823 {
7824 /* On its face this is a lie - the instruction does set the
7825 flags. However, the only supported mnemonic in this mode
7826 says it doesn't. */
7827 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7828
7829 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7830 _("unshifted register required"));
7831 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7832 constraint (Rd != Rs,
7833 _("dest and source1 must be the same register"));
7834
7835 inst.instruction = THUMB_OP16 (inst.instruction);
7836 inst.instruction |= Rd;
7837 inst.instruction |= Rn << 3;
7838 }
7839 }
7840
7841 /* Similarly, but for instructions where the arithmetic operation is
7842 commutative, so we can allow either of them to be different from
7843 the destination operand in a 16-bit instruction. For instance, all
7844 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7845 accepted. */
7846 static void
7847 do_t_arit3c (void)
7848 {
7849 int Rd, Rs, Rn;
7850
7851 Rd = inst.operands[0].reg;
7852 Rs = (inst.operands[1].present
7853 ? inst.operands[1].reg /* Rd, Rs, foo */
7854 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7855 Rn = inst.operands[2].reg;
7856
7857 if (unified_syntax)
7858 {
7859 if (!inst.operands[2].isreg)
7860 {
7861 /* For an immediate, we always generate a 32-bit opcode;
7862 section relaxation will shrink it later if possible. */
7863 inst.instruction = THUMB_OP32 (inst.instruction);
7864 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7865 inst.instruction |= Rd << 8;
7866 inst.instruction |= Rs << 16;
7867 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7868 }
7869 else
7870 {
7871 bfd_boolean narrow;
7872
7873 /* See if we can do this with a 16-bit instruction. */
7874 if (THUMB_SETS_FLAGS (inst.instruction))
7875 narrow = current_it_mask == 0;
7876 else
7877 narrow = current_it_mask != 0;
7878
7879 if (Rd > 7 || Rn > 7 || Rs > 7)
7880 narrow = FALSE;
7881 if (inst.operands[2].shifted)
7882 narrow = FALSE;
7883 if (inst.size_req == 4)
7884 narrow = FALSE;
7885
7886 if (narrow)
7887 {
7888 if (Rd == Rs)
7889 {
7890 inst.instruction = THUMB_OP16 (inst.instruction);
7891 inst.instruction |= Rd;
7892 inst.instruction |= Rn << 3;
7893 return;
7894 }
7895 if (Rd == Rn)
7896 {
7897 inst.instruction = THUMB_OP16 (inst.instruction);
7898 inst.instruction |= Rd;
7899 inst.instruction |= Rs << 3;
7900 return;
7901 }
7902 }
7903
7904 /* If we get here, it can't be done in 16 bits. */
7905 constraint (inst.operands[2].shifted
7906 && inst.operands[2].immisreg,
7907 _("shift must be constant"));
7908 inst.instruction = THUMB_OP32 (inst.instruction);
7909 inst.instruction |= Rd << 8;
7910 inst.instruction |= Rs << 16;
7911 encode_thumb32_shifted_operand (2);
7912 }
7913 }
7914 else
7915 {
7916 /* On its face this is a lie - the instruction does set the
7917 flags. However, the only supported mnemonic in this mode
7918 says it doesn't. */
7919 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7920
7921 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7922 _("unshifted register required"));
7923 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7924
7925 inst.instruction = THUMB_OP16 (inst.instruction);
7926 inst.instruction |= Rd;
7927
7928 if (Rd == Rs)
7929 inst.instruction |= Rn << 3;
7930 else if (Rd == Rn)
7931 inst.instruction |= Rs << 3;
7932 else
7933 constraint (1, _("dest must overlap one source register"));
7934 }
7935 }
7936
7937 static void
7938 do_t_barrier (void)
7939 {
7940 if (inst.operands[0].present)
7941 {
7942 constraint ((inst.instruction & 0xf0) != 0x40
7943 && inst.operands[0].imm != 0xf,
7944 "bad barrier type");
7945 inst.instruction |= inst.operands[0].imm;
7946 }
7947 else
7948 inst.instruction |= 0xf;
7949 }
7950
7951 static void
7952 do_t_bfc (void)
7953 {
7954 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7955 constraint (msb > 32, _("bit-field extends past end of register"));
7956 /* The instruction encoding stores the LSB and MSB,
7957 not the LSB and width. */
7958 inst.instruction |= inst.operands[0].reg << 8;
7959 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
7960 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
7961 inst.instruction |= msb - 1;
7962 }
7963
7964 static void
7965 do_t_bfi (void)
7966 {
7967 unsigned int msb;
7968
7969 /* #0 in second position is alternative syntax for bfc, which is
7970 the same instruction but with REG_PC in the Rm field. */
7971 if (!inst.operands[1].isreg)
7972 inst.operands[1].reg = REG_PC;
7973
7974 msb = inst.operands[2].imm + inst.operands[3].imm;
7975 constraint (msb > 32, _("bit-field extends past end of register"));
7976 /* The instruction encoding stores the LSB and MSB,
7977 not the LSB and width. */
7978 inst.instruction |= inst.operands[0].reg << 8;
7979 inst.instruction |= inst.operands[1].reg << 16;
7980 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
7981 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
7982 inst.instruction |= msb - 1;
7983 }
7984
7985 static void
7986 do_t_bfx (void)
7987 {
7988 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7989 _("bit-field extends past end of register"));
7990 inst.instruction |= inst.operands[0].reg << 8;
7991 inst.instruction |= inst.operands[1].reg << 16;
7992 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
7993 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
7994 inst.instruction |= inst.operands[3].imm - 1;
7995 }
7996
7997 /* ARM V5 Thumb BLX (argument parse)
7998 BLX <target_addr> which is BLX(1)
7999 BLX <Rm> which is BLX(2)
8000 Unfortunately, there are two different opcodes for this mnemonic.
8001 So, the insns[].value is not used, and the code here zaps values
8002 into inst.instruction.
8003
8004 ??? How to take advantage of the additional two bits of displacement
8005 available in Thumb32 mode? Need new relocation? */
8006
8007 static void
8008 do_t_blx (void)
8009 {
8010 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8011 if (inst.operands[0].isreg)
8012 /* We have a register, so this is BLX(2). */
8013 inst.instruction |= inst.operands[0].reg << 3;
8014 else
8015 {
8016 /* No register. This must be BLX(1). */
8017 inst.instruction = 0xf000e800;
8018 #ifdef OBJ_ELF
8019 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8020 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8021 else
8022 #endif
8023 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8024 inst.reloc.pc_rel = 1;
8025 }
8026 }
8027
8028 static void
8029 do_t_branch (void)
8030 {
8031 int opcode;
8032 int cond;
8033
8034 if (current_it_mask)
8035 {
8036 /* Conditional branches inside IT blocks are encoded as unconditional
8037 branches. */
8038 cond = COND_ALWAYS;
8039 /* A branch must be the last instruction in an IT block. */
8040 constraint (current_it_mask != 0x10, BAD_BRANCH);
8041 }
8042 else
8043 cond = inst.cond;
8044
8045 if (cond != COND_ALWAYS)
8046 opcode = T_MNEM_bcond;
8047 else
8048 opcode = inst.instruction;
8049
8050 if (unified_syntax && inst.size_req == 4)
8051 {
8052 inst.instruction = THUMB_OP32(opcode);
8053 if (cond == COND_ALWAYS)
8054 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8055 else
8056 {
8057 assert (cond != 0xF);
8058 inst.instruction |= cond << 22;
8059 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8060 }
8061 }
8062 else
8063 {
8064 inst.instruction = THUMB_OP16(opcode);
8065 if (cond == COND_ALWAYS)
8066 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8067 else
8068 {
8069 inst.instruction |= cond << 8;
8070 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8071 }
8072 /* Allow section relaxation. */
8073 if (unified_syntax && inst.size_req != 2)
8074 inst.relax = opcode;
8075 }
8076
8077 inst.reloc.pc_rel = 1;
8078 }
8079
8080 static void
8081 do_t_bkpt (void)
8082 {
8083 constraint (inst.cond != COND_ALWAYS,
8084 _("instruction is always unconditional"));
8085 if (inst.operands[0].present)
8086 {
8087 constraint (inst.operands[0].imm > 255,
8088 _("immediate value out of range"));
8089 inst.instruction |= inst.operands[0].imm;
8090 }
8091 }
8092
8093 static void
8094 do_t_branch23 (void)
8095 {
8096 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8097 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8098 inst.reloc.pc_rel = 1;
8099
8100 /* If the destination of the branch is a defined symbol which does not have
8101 the THUMB_FUNC attribute, then we must be calling a function which has
8102 the (interfacearm) attribute. We look for the Thumb entry point to that
8103 function and change the branch to refer to that function instead. */
8104 if ( inst.reloc.exp.X_op == O_symbol
8105 && inst.reloc.exp.X_add_symbol != NULL
8106 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8107 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8108 inst.reloc.exp.X_add_symbol =
8109 find_real_start (inst.reloc.exp.X_add_symbol);
8110 }
8111
8112 static void
8113 do_t_bx (void)
8114 {
8115 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8116 inst.instruction |= inst.operands[0].reg << 3;
8117 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8118 should cause the alignment to be checked once it is known. This is
8119 because BX PC only works if the instruction is word aligned. */
8120 }
8121
8122 static void
8123 do_t_bxj (void)
8124 {
8125 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8126 if (inst.operands[0].reg == REG_PC)
8127 as_tsktsk (_("use of r15 in bxj is not really useful"));
8128
8129 inst.instruction |= inst.operands[0].reg << 16;
8130 }
8131
8132 static void
8133 do_t_clz (void)
8134 {
8135 inst.instruction |= inst.operands[0].reg << 8;
8136 inst.instruction |= inst.operands[1].reg << 16;
8137 inst.instruction |= inst.operands[1].reg;
8138 }
8139
8140 static void
8141 do_t_cps (void)
8142 {
8143 constraint (current_it_mask, BAD_NOT_IT);
8144 inst.instruction |= inst.operands[0].imm;
8145 }
8146
8147 static void
8148 do_t_cpsi (void)
8149 {
8150 constraint (current_it_mask, BAD_NOT_IT);
8151 if (unified_syntax
8152 && (inst.operands[1].present || inst.size_req == 4)
8153 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8154 {
8155 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8156 inst.instruction = 0xf3af8000;
8157 inst.instruction |= imod << 9;
8158 inst.instruction |= inst.operands[0].imm << 5;
8159 if (inst.operands[1].present)
8160 inst.instruction |= 0x100 | inst.operands[1].imm;
8161 }
8162 else
8163 {
8164 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8165 && (inst.operands[0].imm & 4),
8166 _("selected processor does not support 'A' form "
8167 "of this instruction"));
8168 constraint (inst.operands[1].present || inst.size_req == 4,
8169 _("Thumb does not support the 2-argument "
8170 "form of this instruction"));
8171 inst.instruction |= inst.operands[0].imm;
8172 }
8173 }
8174
8175 /* THUMB CPY instruction (argument parse). */
8176
8177 static void
8178 do_t_cpy (void)
8179 {
8180 if (inst.size_req == 4)
8181 {
8182 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8183 inst.instruction |= inst.operands[0].reg << 8;
8184 inst.instruction |= inst.operands[1].reg;
8185 }
8186 else
8187 {
8188 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8189 inst.instruction |= (inst.operands[0].reg & 0x7);
8190 inst.instruction |= inst.operands[1].reg << 3;
8191 }
8192 }
8193
8194 static void
8195 do_t_czb (void)
8196 {
8197 constraint (current_it_mask, BAD_NOT_IT);
8198 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8199 inst.instruction |= inst.operands[0].reg;
8200 inst.reloc.pc_rel = 1;
8201 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8202 }
8203
8204 static void
8205 do_t_dbg (void)
8206 {
8207 inst.instruction |= inst.operands[0].imm;
8208 }
8209
8210 static void
8211 do_t_div (void)
8212 {
8213 if (!inst.operands[1].present)
8214 inst.operands[1].reg = inst.operands[0].reg;
8215 inst.instruction |= inst.operands[0].reg << 8;
8216 inst.instruction |= inst.operands[1].reg << 16;
8217 inst.instruction |= inst.operands[2].reg;
8218 }
8219
8220 static void
8221 do_t_hint (void)
8222 {
8223 if (unified_syntax && inst.size_req == 4)
8224 inst.instruction = THUMB_OP32 (inst.instruction);
8225 else
8226 inst.instruction = THUMB_OP16 (inst.instruction);
8227 }
8228
8229 static void
8230 do_t_it (void)
8231 {
8232 unsigned int cond = inst.operands[0].imm;
8233
8234 constraint (current_it_mask, BAD_NOT_IT);
8235 current_it_mask = (inst.instruction & 0xf) | 0x10;
8236 current_cc = cond;
8237
8238 /* If the condition is a negative condition, invert the mask. */
8239 if ((cond & 0x1) == 0x0)
8240 {
8241 unsigned int mask = inst.instruction & 0x000f;
8242
8243 if ((mask & 0x7) == 0)
8244 /* no conversion needed */;
8245 else if ((mask & 0x3) == 0)
8246 mask ^= 0x8;
8247 else if ((mask & 0x1) == 0)
8248 mask ^= 0xC;
8249 else
8250 mask ^= 0xE;
8251
8252 inst.instruction &= 0xfff0;
8253 inst.instruction |= mask;
8254 }
8255
8256 inst.instruction |= cond << 4;
8257 }
8258
8259 static void
8260 do_t_ldmstm (void)
8261 {
8262 /* This really doesn't seem worth it. */
8263 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8264 _("expression too complex"));
8265 constraint (inst.operands[1].writeback,
8266 _("Thumb load/store multiple does not support {reglist}^"));
8267
8268 if (unified_syntax)
8269 {
8270 /* See if we can use a 16-bit instruction. */
8271 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
8272 && inst.size_req != 4
8273 && inst.operands[0].reg <= 7
8274 && !(inst.operands[1].imm & ~0xff)
8275 && (inst.instruction == T_MNEM_stmia
8276 ? inst.operands[0].writeback
8277 : (inst.operands[0].writeback
8278 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
8279 {
8280 if (inst.instruction == T_MNEM_stmia
8281 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
8282 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8283 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8284 inst.operands[0].reg);
8285
8286 inst.instruction = THUMB_OP16 (inst.instruction);
8287 inst.instruction |= inst.operands[0].reg << 8;
8288 inst.instruction |= inst.operands[1].imm;
8289 }
8290 else
8291 {
8292 if (inst.operands[1].imm & (1 << 13))
8293 as_warn (_("SP should not be in register list"));
8294 if (inst.instruction == T_MNEM_stmia)
8295 {
8296 if (inst.operands[1].imm & (1 << 15))
8297 as_warn (_("PC should not be in register list"));
8298 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
8299 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8300 inst.operands[0].reg);
8301 }
8302 else
8303 {
8304 if (inst.operands[1].imm & (1 << 14)
8305 && inst.operands[1].imm & (1 << 15))
8306 as_warn (_("LR and PC should not both be in register list"));
8307 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8308 && inst.operands[0].writeback)
8309 as_warn (_("base register should not be in register list "
8310 "when written back"));
8311 }
8312 if (inst.instruction < 0xffff)
8313 inst.instruction = THUMB_OP32 (inst.instruction);
8314 inst.instruction |= inst.operands[0].reg << 16;
8315 inst.instruction |= inst.operands[1].imm;
8316 if (inst.operands[0].writeback)
8317 inst.instruction |= WRITE_BACK;
8318 }
8319 }
8320 else
8321 {
8322 constraint (inst.operands[0].reg > 7
8323 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
8324 if (inst.instruction == T_MNEM_stmia)
8325 {
8326 if (!inst.operands[0].writeback)
8327 as_warn (_("this instruction will write back the base register"));
8328 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8329 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8330 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8331 inst.operands[0].reg);
8332 }
8333 else
8334 {
8335 if (!inst.operands[0].writeback
8336 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
8337 as_warn (_("this instruction will write back the base register"));
8338 else if (inst.operands[0].writeback
8339 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
8340 as_warn (_("this instruction will not write back the base register"));
8341 }
8342
8343 inst.instruction = THUMB_OP16 (inst.instruction);
8344 inst.instruction |= inst.operands[0].reg << 8;
8345 inst.instruction |= inst.operands[1].imm;
8346 }
8347 }
8348
8349 static void
8350 do_t_ldrex (void)
8351 {
8352 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8353 || inst.operands[1].postind || inst.operands[1].writeback
8354 || inst.operands[1].immisreg || inst.operands[1].shifted
8355 || inst.operands[1].negative,
8356 BAD_ADDR_MODE);
8357
8358 inst.instruction |= inst.operands[0].reg << 12;
8359 inst.instruction |= inst.operands[1].reg << 16;
8360 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
8361 }
8362
8363 static void
8364 do_t_ldrexd (void)
8365 {
8366 if (!inst.operands[1].present)
8367 {
8368 constraint (inst.operands[0].reg == REG_LR,
8369 _("r14 not allowed as first register "
8370 "when second register is omitted"));
8371 inst.operands[1].reg = inst.operands[0].reg + 1;
8372 }
8373 constraint (inst.operands[0].reg == inst.operands[1].reg,
8374 BAD_OVERLAP);
8375
8376 inst.instruction |= inst.operands[0].reg << 12;
8377 inst.instruction |= inst.operands[1].reg << 8;
8378 inst.instruction |= inst.operands[2].reg << 16;
8379 }
8380
8381 static void
8382 do_t_ldst (void)
8383 {
8384 unsigned long opcode;
8385 int Rn;
8386
8387 opcode = inst.instruction;
8388 if (unified_syntax)
8389 {
8390 if (!inst.operands[1].isreg)
8391 {
8392 if (opcode <= 0xffff)
8393 inst.instruction = THUMB_OP32 (opcode);
8394 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8395 return;
8396 }
8397 if (inst.operands[1].isreg
8398 && !inst.operands[1].writeback
8399 && !inst.operands[1].shifted && !inst.operands[1].postind
8400 && !inst.operands[1].negative && inst.operands[0].reg <= 7
8401 && opcode <= 0xffff
8402 && inst.size_req != 4)
8403 {
8404 /* Insn may have a 16-bit form. */
8405 Rn = inst.operands[1].reg;
8406 if (inst.operands[1].immisreg)
8407 {
8408 inst.instruction = THUMB_OP16 (opcode);
8409 /* [Rn, Ri] */
8410 if (Rn <= 7 && inst.operands[1].imm <= 7)
8411 goto op16;
8412 }
8413 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
8414 && opcode != T_MNEM_ldrsb)
8415 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
8416 || (Rn == REG_SP && opcode == T_MNEM_str))
8417 {
8418 /* [Rn, #const] */
8419 if (Rn > 7)
8420 {
8421 if (Rn == REG_PC)
8422 {
8423 if (inst.reloc.pc_rel)
8424 opcode = T_MNEM_ldr_pc2;
8425 else
8426 opcode = T_MNEM_ldr_pc;
8427 }
8428 else
8429 {
8430 if (opcode == T_MNEM_ldr)
8431 opcode = T_MNEM_ldr_sp;
8432 else
8433 opcode = T_MNEM_str_sp;
8434 }
8435 inst.instruction = inst.operands[0].reg << 8;
8436 }
8437 else
8438 {
8439 inst.instruction = inst.operands[0].reg;
8440 inst.instruction |= inst.operands[1].reg << 3;
8441 }
8442 inst.instruction |= THUMB_OP16 (opcode);
8443 if (inst.size_req == 2)
8444 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8445 else
8446 inst.relax = opcode;
8447 return;
8448 }
8449 }
8450 /* Definitely a 32-bit variant. */
8451 inst.instruction = THUMB_OP32 (opcode);
8452 inst.instruction |= inst.operands[0].reg << 12;
8453 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
8454 return;
8455 }
8456
8457 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8458
8459 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
8460 {
8461 /* Only [Rn,Rm] is acceptable. */
8462 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
8463 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
8464 || inst.operands[1].postind || inst.operands[1].shifted
8465 || inst.operands[1].negative,
8466 _("Thumb does not support this addressing mode"));
8467 inst.instruction = THUMB_OP16 (inst.instruction);
8468 goto op16;
8469 }
8470
8471 inst.instruction = THUMB_OP16 (inst.instruction);
8472 if (!inst.operands[1].isreg)
8473 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8474 return;
8475
8476 constraint (!inst.operands[1].preind
8477 || inst.operands[1].shifted
8478 || inst.operands[1].writeback,
8479 _("Thumb does not support this addressing mode"));
8480 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
8481 {
8482 constraint (inst.instruction & 0x0600,
8483 _("byte or halfword not valid for base register"));
8484 constraint (inst.operands[1].reg == REG_PC
8485 && !(inst.instruction & THUMB_LOAD_BIT),
8486 _("r15 based store not allowed"));
8487 constraint (inst.operands[1].immisreg,
8488 _("invalid base register for register offset"));
8489
8490 if (inst.operands[1].reg == REG_PC)
8491 inst.instruction = T_OPCODE_LDR_PC;
8492 else if (inst.instruction & THUMB_LOAD_BIT)
8493 inst.instruction = T_OPCODE_LDR_SP;
8494 else
8495 inst.instruction = T_OPCODE_STR_SP;
8496
8497 inst.instruction |= inst.operands[0].reg << 8;
8498 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8499 return;
8500 }
8501
8502 constraint (inst.operands[1].reg > 7, BAD_HIREG);
8503 if (!inst.operands[1].immisreg)
8504 {
8505 /* Immediate offset. */
8506 inst.instruction |= inst.operands[0].reg;
8507 inst.instruction |= inst.operands[1].reg << 3;
8508 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8509 return;
8510 }
8511
8512 /* Register offset. */
8513 constraint (inst.operands[1].imm > 7, BAD_HIREG);
8514 constraint (inst.operands[1].negative,
8515 _("Thumb does not support this addressing mode"));
8516
8517 op16:
8518 switch (inst.instruction)
8519 {
8520 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
8521 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
8522 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
8523 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
8524 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
8525 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
8526 case 0x5600 /* ldrsb */:
8527 case 0x5e00 /* ldrsh */: break;
8528 default: abort ();
8529 }
8530
8531 inst.instruction |= inst.operands[0].reg;
8532 inst.instruction |= inst.operands[1].reg << 3;
8533 inst.instruction |= inst.operands[1].imm << 6;
8534 }
8535
8536 static void
8537 do_t_ldstd (void)
8538 {
8539 if (!inst.operands[1].present)
8540 {
8541 inst.operands[1].reg = inst.operands[0].reg + 1;
8542 constraint (inst.operands[0].reg == REG_LR,
8543 _("r14 not allowed here"));
8544 }
8545 inst.instruction |= inst.operands[0].reg << 12;
8546 inst.instruction |= inst.operands[1].reg << 8;
8547 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
8548
8549 }
8550
8551 static void
8552 do_t_ldstt (void)
8553 {
8554 inst.instruction |= inst.operands[0].reg << 12;
8555 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
8556 }
8557
8558 static void
8559 do_t_mla (void)
8560 {
8561 inst.instruction |= inst.operands[0].reg << 8;
8562 inst.instruction |= inst.operands[1].reg << 16;
8563 inst.instruction |= inst.operands[2].reg;
8564 inst.instruction |= inst.operands[3].reg << 12;
8565 }
8566
8567 static void
8568 do_t_mlal (void)
8569 {
8570 inst.instruction |= inst.operands[0].reg << 12;
8571 inst.instruction |= inst.operands[1].reg << 8;
8572 inst.instruction |= inst.operands[2].reg << 16;
8573 inst.instruction |= inst.operands[3].reg;
8574 }
8575
8576 static void
8577 do_t_mov_cmp (void)
8578 {
8579 if (unified_syntax)
8580 {
8581 int r0off = (inst.instruction == T_MNEM_mov
8582 || inst.instruction == T_MNEM_movs) ? 8 : 16;
8583 unsigned long opcode;
8584 bfd_boolean narrow;
8585 bfd_boolean low_regs;
8586
8587 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
8588 opcode = inst.instruction;
8589 if (current_it_mask)
8590 narrow = opcode != T_MNEM_movs;
8591 else
8592 narrow = opcode != T_MNEM_movs || low_regs;
8593 if (inst.size_req == 4
8594 || inst.operands[1].shifted)
8595 narrow = FALSE;
8596
8597 if (!inst.operands[1].isreg)
8598 {
8599 /* Immediate operand. */
8600 if (current_it_mask == 0 && opcode == T_MNEM_mov)
8601 narrow = 0;
8602 if (low_regs && narrow)
8603 {
8604 inst.instruction = THUMB_OP16 (opcode);
8605 inst.instruction |= inst.operands[0].reg << 8;
8606 if (inst.size_req == 2)
8607 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8608 else
8609 inst.relax = opcode;
8610 }
8611 else
8612 {
8613 inst.instruction = THUMB_OP32 (inst.instruction);
8614 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8615 inst.instruction |= inst.operands[0].reg << r0off;
8616 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8617 }
8618 }
8619 else if (!narrow)
8620 {
8621 inst.instruction = THUMB_OP32 (inst.instruction);
8622 inst.instruction |= inst.operands[0].reg << r0off;
8623 encode_thumb32_shifted_operand (1);
8624 }
8625 else
8626 switch (inst.instruction)
8627 {
8628 case T_MNEM_mov:
8629 inst.instruction = T_OPCODE_MOV_HR;
8630 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8631 inst.instruction |= (inst.operands[0].reg & 0x7);
8632 inst.instruction |= inst.operands[1].reg << 3;
8633 break;
8634
8635 case T_MNEM_movs:
8636 /* We know we have low registers at this point.
8637 Generate ADD Rd, Rs, #0. */
8638 inst.instruction = T_OPCODE_ADD_I3;
8639 inst.instruction |= inst.operands[0].reg;
8640 inst.instruction |= inst.operands[1].reg << 3;
8641 break;
8642
8643 case T_MNEM_cmp:
8644 if (low_regs)
8645 {
8646 inst.instruction = T_OPCODE_CMP_LR;
8647 inst.instruction |= inst.operands[0].reg;
8648 inst.instruction |= inst.operands[1].reg << 3;
8649 }
8650 else
8651 {
8652 inst.instruction = T_OPCODE_CMP_HR;
8653 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8654 inst.instruction |= (inst.operands[0].reg & 0x7);
8655 inst.instruction |= inst.operands[1].reg << 3;
8656 }
8657 break;
8658 }
8659 return;
8660 }
8661
8662 inst.instruction = THUMB_OP16 (inst.instruction);
8663 if (inst.operands[1].isreg)
8664 {
8665 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
8666 {
8667 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8668 since a MOV instruction produces unpredictable results. */
8669 if (inst.instruction == T_OPCODE_MOV_I8)
8670 inst.instruction = T_OPCODE_ADD_I3;
8671 else
8672 inst.instruction = T_OPCODE_CMP_LR;
8673
8674 inst.instruction |= inst.operands[0].reg;
8675 inst.instruction |= inst.operands[1].reg << 3;
8676 }
8677 else
8678 {
8679 if (inst.instruction == T_OPCODE_MOV_I8)
8680 inst.instruction = T_OPCODE_MOV_HR;
8681 else
8682 inst.instruction = T_OPCODE_CMP_HR;
8683 do_t_cpy ();
8684 }
8685 }
8686 else
8687 {
8688 constraint (inst.operands[0].reg > 7,
8689 _("only lo regs allowed with immediate"));
8690 inst.instruction |= inst.operands[0].reg << 8;
8691 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8692 }
8693 }
8694
8695 static void
8696 do_t_mov16 (void)
8697 {
8698 inst.instruction |= inst.operands[0].reg << 8;
8699 inst.instruction |= (inst.operands[1].imm & 0xf000) << 4;
8700 inst.instruction |= (inst.operands[1].imm & 0x0800) << 15;
8701 inst.instruction |= (inst.operands[1].imm & 0x0700) << 4;
8702 inst.instruction |= (inst.operands[1].imm & 0x00ff);
8703 }
8704
8705 static void
8706 do_t_mvn_tst (void)
8707 {
8708 if (unified_syntax)
8709 {
8710 int r0off = (inst.instruction == T_MNEM_mvn
8711 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
8712 bfd_boolean narrow;
8713
8714 if (inst.size_req == 4
8715 || inst.instruction > 0xffff
8716 || inst.operands[1].shifted
8717 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8718 narrow = FALSE;
8719 else if (inst.instruction == T_MNEM_cmn)
8720 narrow = TRUE;
8721 else if (THUMB_SETS_FLAGS (inst.instruction))
8722 narrow = (current_it_mask == 0);
8723 else
8724 narrow = (current_it_mask != 0);
8725
8726 if (!inst.operands[1].isreg)
8727 {
8728 /* For an immediate, we always generate a 32-bit opcode;
8729 section relaxation will shrink it later if possible. */
8730 if (inst.instruction < 0xffff)
8731 inst.instruction = THUMB_OP32 (inst.instruction);
8732 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8733 inst.instruction |= inst.operands[0].reg << r0off;
8734 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8735 }
8736 else
8737 {
8738 /* See if we can do this with a 16-bit instruction. */
8739 if (narrow)
8740 {
8741 inst.instruction = THUMB_OP16 (inst.instruction);
8742 inst.instruction |= inst.operands[0].reg;
8743 inst.instruction |= inst.operands[1].reg << 3;
8744 }
8745 else
8746 {
8747 constraint (inst.operands[1].shifted
8748 && inst.operands[1].immisreg,
8749 _("shift must be constant"));
8750 if (inst.instruction < 0xffff)
8751 inst.instruction = THUMB_OP32 (inst.instruction);
8752 inst.instruction |= inst.operands[0].reg << r0off;
8753 encode_thumb32_shifted_operand (1);
8754 }
8755 }
8756 }
8757 else
8758 {
8759 constraint (inst.instruction > 0xffff
8760 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
8761 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
8762 _("unshifted register required"));
8763 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8764 BAD_HIREG);
8765
8766 inst.instruction = THUMB_OP16 (inst.instruction);
8767 inst.instruction |= inst.operands[0].reg;
8768 inst.instruction |= inst.operands[1].reg << 3;
8769 }
8770 }
8771
8772 static void
8773 do_t_mrs (void)
8774 {
8775 int flags;
8776 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
8777 if (flags == 0)
8778 {
8779 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8780 _("selected processor does not support "
8781 "requested special purpose register"));
8782 }
8783 else
8784 {
8785 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8786 _("selected processor does not support "
8787 "requested special purpose register %x"));
8788 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8789 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
8790 _("'CPSR' or 'SPSR' expected"));
8791 }
8792
8793 inst.instruction |= inst.operands[0].reg << 8;
8794 inst.instruction |= (flags & SPSR_BIT) >> 2;
8795 inst.instruction |= inst.operands[1].imm & 0xff;
8796 }
8797
8798 static void
8799 do_t_msr (void)
8800 {
8801 int flags;
8802
8803 constraint (!inst.operands[1].isreg,
8804 _("Thumb encoding does not support an immediate here"));
8805 flags = inst.operands[0].imm;
8806 if (flags & ~0xff)
8807 {
8808 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8809 _("selected processor does not support "
8810 "requested special purpose register"));
8811 }
8812 else
8813 {
8814 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8815 _("selected processor does not support "
8816 "requested special purpose register"));
8817 flags |= PSR_f;
8818 }
8819 inst.instruction |= (flags & SPSR_BIT) >> 2;
8820 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
8821 inst.instruction |= (flags & 0xff);
8822 inst.instruction |= inst.operands[1].reg << 16;
8823 }
8824
8825 static void
8826 do_t_mul (void)
8827 {
8828 if (!inst.operands[2].present)
8829 inst.operands[2].reg = inst.operands[0].reg;
8830
8831 /* There is no 32-bit MULS and no 16-bit MUL. */
8832 if (unified_syntax && inst.instruction == T_MNEM_mul)
8833 {
8834 inst.instruction = THUMB_OP32 (inst.instruction);
8835 inst.instruction |= inst.operands[0].reg << 8;
8836 inst.instruction |= inst.operands[1].reg << 16;
8837 inst.instruction |= inst.operands[2].reg << 0;
8838 }
8839 else
8840 {
8841 constraint (!unified_syntax
8842 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
8843 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8844 BAD_HIREG);
8845
8846 inst.instruction = THUMB_OP16 (inst.instruction);
8847 inst.instruction |= inst.operands[0].reg;
8848
8849 if (inst.operands[0].reg == inst.operands[1].reg)
8850 inst.instruction |= inst.operands[2].reg << 3;
8851 else if (inst.operands[0].reg == inst.operands[2].reg)
8852 inst.instruction |= inst.operands[1].reg << 3;
8853 else
8854 constraint (1, _("dest must overlap one source register"));
8855 }
8856 }
8857
8858 static void
8859 do_t_mull (void)
8860 {
8861 inst.instruction |= inst.operands[0].reg << 12;
8862 inst.instruction |= inst.operands[1].reg << 8;
8863 inst.instruction |= inst.operands[2].reg << 16;
8864 inst.instruction |= inst.operands[3].reg;
8865
8866 if (inst.operands[0].reg == inst.operands[1].reg)
8867 as_tsktsk (_("rdhi and rdlo must be different"));
8868 }
8869
8870 static void
8871 do_t_nop (void)
8872 {
8873 if (unified_syntax)
8874 {
8875 if (inst.size_req == 4 || inst.operands[0].imm > 15)
8876 {
8877 inst.instruction = THUMB_OP32 (inst.instruction);
8878 inst.instruction |= inst.operands[0].imm;
8879 }
8880 else
8881 {
8882 inst.instruction = THUMB_OP16 (inst.instruction);
8883 inst.instruction |= inst.operands[0].imm << 4;
8884 }
8885 }
8886 else
8887 {
8888 constraint (inst.operands[0].present,
8889 _("Thumb does not support NOP with hints"));
8890 inst.instruction = 0x46c0;
8891 }
8892 }
8893
8894 static void
8895 do_t_neg (void)
8896 {
8897 if (unified_syntax)
8898 {
8899 bfd_boolean narrow;
8900
8901 if (THUMB_SETS_FLAGS (inst.instruction))
8902 narrow = (current_it_mask == 0);
8903 else
8904 narrow = (current_it_mask != 0);
8905 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8906 narrow = FALSE;
8907 if (inst.size_req == 4)
8908 narrow = FALSE;
8909
8910 if (!narrow)
8911 {
8912 inst.instruction = THUMB_OP32 (inst.instruction);
8913 inst.instruction |= inst.operands[0].reg << 8;
8914 inst.instruction |= inst.operands[1].reg << 16;
8915 }
8916 else
8917 {
8918 inst.instruction = THUMB_OP16 (inst.instruction);
8919 inst.instruction |= inst.operands[0].reg;
8920 inst.instruction |= inst.operands[1].reg << 3;
8921 }
8922 }
8923 else
8924 {
8925 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8926 BAD_HIREG);
8927 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8928
8929 inst.instruction = THUMB_OP16 (inst.instruction);
8930 inst.instruction |= inst.operands[0].reg;
8931 inst.instruction |= inst.operands[1].reg << 3;
8932 }
8933 }
8934
8935 static void
8936 do_t_pkhbt (void)
8937 {
8938 inst.instruction |= inst.operands[0].reg << 8;
8939 inst.instruction |= inst.operands[1].reg << 16;
8940 inst.instruction |= inst.operands[2].reg;
8941 if (inst.operands[3].present)
8942 {
8943 unsigned int val = inst.reloc.exp.X_add_number;
8944 constraint (inst.reloc.exp.X_op != O_constant,
8945 _("expression too complex"));
8946 inst.instruction |= (val & 0x1c) << 10;
8947 inst.instruction |= (val & 0x03) << 6;
8948 }
8949 }
8950
8951 static void
8952 do_t_pkhtb (void)
8953 {
8954 if (!inst.operands[3].present)
8955 inst.instruction &= ~0x00000020;
8956 do_t_pkhbt ();
8957 }
8958
8959 static void
8960 do_t_pld (void)
8961 {
8962 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
8963 }
8964
8965 static void
8966 do_t_push_pop (void)
8967 {
8968 unsigned mask;
8969
8970 constraint (inst.operands[0].writeback,
8971 _("push/pop do not support {reglist}^"));
8972 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8973 _("expression too complex"));
8974
8975 mask = inst.operands[0].imm;
8976 if ((mask & ~0xff) == 0)
8977 inst.instruction = THUMB_OP16 (inst.instruction);
8978 else if ((inst.instruction == T_MNEM_push
8979 && (mask & ~0xff) == 1 << REG_LR)
8980 || (inst.instruction == T_MNEM_pop
8981 && (mask & ~0xff) == 1 << REG_PC))
8982 {
8983 inst.instruction = THUMB_OP16 (inst.instruction);
8984 inst.instruction |= THUMB_PP_PC_LR;
8985 mask &= 0xff;
8986 }
8987 else if (unified_syntax)
8988 {
8989 if (mask & (1 << 13))
8990 inst.error = _("SP not allowed in register list");
8991 if (inst.instruction == T_MNEM_push)
8992 {
8993 if (mask & (1 << 15))
8994 inst.error = _("PC not allowed in register list");
8995 }
8996 else
8997 {
8998 if (mask & (1 << 14)
8999 && mask & (1 << 15))
9000 inst.error = _("LR and PC should not both be in register list");
9001 }
9002 if ((mask & (mask - 1)) == 0)
9003 {
9004 /* Single register push/pop implemented as str/ldr. */
9005 if (inst.instruction == T_MNEM_push)
9006 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9007 else
9008 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9009 mask = ffs(mask) - 1;
9010 mask <<= 12;
9011 }
9012 else
9013 inst.instruction = THUMB_OP32 (inst.instruction);
9014 }
9015 else
9016 {
9017 inst.error = _("invalid register list to push/pop instruction");
9018 return;
9019 }
9020
9021 inst.instruction |= mask;
9022 }
9023
9024 static void
9025 do_t_rbit (void)
9026 {
9027 inst.instruction |= inst.operands[0].reg << 8;
9028 inst.instruction |= inst.operands[1].reg << 16;
9029 }
9030
9031 static void
9032 do_t_rev (void)
9033 {
9034 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9035 && inst.size_req != 4)
9036 {
9037 inst.instruction = THUMB_OP16 (inst.instruction);
9038 inst.instruction |= inst.operands[0].reg;
9039 inst.instruction |= inst.operands[1].reg << 3;
9040 }
9041 else if (unified_syntax)
9042 {
9043 inst.instruction = THUMB_OP32 (inst.instruction);
9044 inst.instruction |= inst.operands[0].reg << 8;
9045 inst.instruction |= inst.operands[1].reg << 16;
9046 inst.instruction |= inst.operands[1].reg;
9047 }
9048 else
9049 inst.error = BAD_HIREG;
9050 }
9051
9052 static void
9053 do_t_rsb (void)
9054 {
9055 int Rd, Rs;
9056
9057 Rd = inst.operands[0].reg;
9058 Rs = (inst.operands[1].present
9059 ? inst.operands[1].reg /* Rd, Rs, foo */
9060 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9061
9062 inst.instruction |= Rd << 8;
9063 inst.instruction |= Rs << 16;
9064 if (!inst.operands[2].isreg)
9065 {
9066 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9067 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9068 }
9069 else
9070 encode_thumb32_shifted_operand (2);
9071 }
9072
9073 static void
9074 do_t_setend (void)
9075 {
9076 constraint (current_it_mask, BAD_NOT_IT);
9077 if (inst.operands[0].imm)
9078 inst.instruction |= 0x8;
9079 }
9080
9081 static void
9082 do_t_shift (void)
9083 {
9084 if (!inst.operands[1].present)
9085 inst.operands[1].reg = inst.operands[0].reg;
9086
9087 if (unified_syntax)
9088 {
9089 bfd_boolean narrow;
9090 int shift_kind;
9091
9092 switch (inst.instruction)
9093 {
9094 case T_MNEM_asr:
9095 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9096 case T_MNEM_lsl:
9097 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9098 case T_MNEM_lsr:
9099 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9100 case T_MNEM_ror:
9101 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9102 default: abort ();
9103 }
9104
9105 if (THUMB_SETS_FLAGS (inst.instruction))
9106 narrow = (current_it_mask == 0);
9107 else
9108 narrow = (current_it_mask != 0);
9109 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9110 narrow = FALSE;
9111 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9112 narrow = FALSE;
9113 if (inst.operands[2].isreg
9114 && (inst.operands[1].reg != inst.operands[0].reg
9115 || inst.operands[2].reg > 7))
9116 narrow = FALSE;
9117 if (inst.size_req == 4)
9118 narrow = FALSE;
9119
9120 if (!narrow)
9121 {
9122 if (inst.operands[2].isreg)
9123 {
9124 inst.instruction = THUMB_OP32 (inst.instruction);
9125 inst.instruction |= inst.operands[0].reg << 8;
9126 inst.instruction |= inst.operands[1].reg << 16;
9127 inst.instruction |= inst.operands[2].reg;
9128 }
9129 else
9130 {
9131 inst.operands[1].shifted = 1;
9132 inst.operands[1].shift_kind = shift_kind;
9133 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9134 ? T_MNEM_movs : T_MNEM_mov);
9135 inst.instruction |= inst.operands[0].reg << 8;
9136 encode_thumb32_shifted_operand (1);
9137 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9138 inst.reloc.type = BFD_RELOC_UNUSED;
9139 }
9140 }
9141 else
9142 {
9143 if (inst.operands[2].isreg)
9144 {
9145 switch (shift_kind)
9146 {
9147 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9148 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9149 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9150 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9151 default: abort ();
9152 }
9153
9154 inst.instruction |= inst.operands[0].reg;
9155 inst.instruction |= inst.operands[2].reg << 3;
9156 }
9157 else
9158 {
9159 switch (shift_kind)
9160 {
9161 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9162 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9163 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9164 default: abort ();
9165 }
9166 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9167 inst.instruction |= inst.operands[0].reg;
9168 inst.instruction |= inst.operands[1].reg << 3;
9169 }
9170 }
9171 }
9172 else
9173 {
9174 constraint (inst.operands[0].reg > 7
9175 || inst.operands[1].reg > 7, BAD_HIREG);
9176 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9177
9178 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9179 {
9180 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9181 constraint (inst.operands[0].reg != inst.operands[1].reg,
9182 _("source1 and dest must be same register"));
9183
9184 switch (inst.instruction)
9185 {
9186 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9187 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9188 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9189 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9190 default: abort ();
9191 }
9192
9193 inst.instruction |= inst.operands[0].reg;
9194 inst.instruction |= inst.operands[2].reg << 3;
9195 }
9196 else
9197 {
9198 switch (inst.instruction)
9199 {
9200 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
9201 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
9202 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
9203 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
9204 default: abort ();
9205 }
9206 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9207 inst.instruction |= inst.operands[0].reg;
9208 inst.instruction |= inst.operands[1].reg << 3;
9209 }
9210 }
9211 }
9212
9213 static void
9214 do_t_simd (void)
9215 {
9216 inst.instruction |= inst.operands[0].reg << 8;
9217 inst.instruction |= inst.operands[1].reg << 16;
9218 inst.instruction |= inst.operands[2].reg;
9219 }
9220
9221 static void
9222 do_t_smc (void)
9223 {
9224 unsigned int value = inst.reloc.exp.X_add_number;
9225 constraint (inst.reloc.exp.X_op != O_constant,
9226 _("expression too complex"));
9227 inst.reloc.type = BFD_RELOC_UNUSED;
9228 inst.instruction |= (value & 0xf000) >> 12;
9229 inst.instruction |= (value & 0x0ff0);
9230 inst.instruction |= (value & 0x000f) << 16;
9231 }
9232
9233 static void
9234 do_t_ssat (void)
9235 {
9236 inst.instruction |= inst.operands[0].reg << 8;
9237 inst.instruction |= inst.operands[1].imm - 1;
9238 inst.instruction |= inst.operands[2].reg << 16;
9239
9240 if (inst.operands[3].present)
9241 {
9242 constraint (inst.reloc.exp.X_op != O_constant,
9243 _("expression too complex"));
9244
9245 if (inst.reloc.exp.X_add_number != 0)
9246 {
9247 if (inst.operands[3].shift_kind == SHIFT_ASR)
9248 inst.instruction |= 0x00200000; /* sh bit */
9249 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9250 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9251 }
9252 inst.reloc.type = BFD_RELOC_UNUSED;
9253 }
9254 }
9255
9256 static void
9257 do_t_ssat16 (void)
9258 {
9259 inst.instruction |= inst.operands[0].reg << 8;
9260 inst.instruction |= inst.operands[1].imm - 1;
9261 inst.instruction |= inst.operands[2].reg << 16;
9262 }
9263
9264 static void
9265 do_t_strex (void)
9266 {
9267 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9268 || inst.operands[2].postind || inst.operands[2].writeback
9269 || inst.operands[2].immisreg || inst.operands[2].shifted
9270 || inst.operands[2].negative,
9271 BAD_ADDR_MODE);
9272
9273 inst.instruction |= inst.operands[0].reg << 8;
9274 inst.instruction |= inst.operands[1].reg << 12;
9275 inst.instruction |= inst.operands[2].reg << 16;
9276 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9277 }
9278
9279 static void
9280 do_t_strexd (void)
9281 {
9282 if (!inst.operands[2].present)
9283 inst.operands[2].reg = inst.operands[1].reg + 1;
9284
9285 constraint (inst.operands[0].reg == inst.operands[1].reg
9286 || inst.operands[0].reg == inst.operands[2].reg
9287 || inst.operands[0].reg == inst.operands[3].reg
9288 || inst.operands[1].reg == inst.operands[2].reg,
9289 BAD_OVERLAP);
9290
9291 inst.instruction |= inst.operands[0].reg;
9292 inst.instruction |= inst.operands[1].reg << 12;
9293 inst.instruction |= inst.operands[2].reg << 8;
9294 inst.instruction |= inst.operands[3].reg << 16;
9295 }
9296
9297 static void
9298 do_t_sxtah (void)
9299 {
9300 inst.instruction |= inst.operands[0].reg << 8;
9301 inst.instruction |= inst.operands[1].reg << 16;
9302 inst.instruction |= inst.operands[2].reg;
9303 inst.instruction |= inst.operands[3].imm << 4;
9304 }
9305
9306 static void
9307 do_t_sxth (void)
9308 {
9309 if (inst.instruction <= 0xffff && inst.size_req != 4
9310 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9311 && (!inst.operands[2].present || inst.operands[2].imm == 0))
9312 {
9313 inst.instruction = THUMB_OP16 (inst.instruction);
9314 inst.instruction |= inst.operands[0].reg;
9315 inst.instruction |= inst.operands[1].reg << 3;
9316 }
9317 else if (unified_syntax)
9318 {
9319 if (inst.instruction <= 0xffff)
9320 inst.instruction = THUMB_OP32 (inst.instruction);
9321 inst.instruction |= inst.operands[0].reg << 8;
9322 inst.instruction |= inst.operands[1].reg;
9323 inst.instruction |= inst.operands[2].imm << 4;
9324 }
9325 else
9326 {
9327 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
9328 _("Thumb encoding does not support rotation"));
9329 constraint (1, BAD_HIREG);
9330 }
9331 }
9332
9333 static void
9334 do_t_swi (void)
9335 {
9336 inst.reloc.type = BFD_RELOC_ARM_SWI;
9337 }
9338
9339 static void
9340 do_t_tb (void)
9341 {
9342 int half;
9343
9344 half = (inst.instruction & 0x10) != 0;
9345 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9346 constraint (inst.operands[0].immisreg,
9347 _("instruction requires register index"));
9348 constraint (inst.operands[0].imm == 15,
9349 _("PC is not a valid index register"));
9350 constraint (!half && inst.operands[0].shifted,
9351 _("instruction does not allow shifted index"));
9352 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
9353 }
9354
9355 static void
9356 do_t_usat (void)
9357 {
9358 inst.instruction |= inst.operands[0].reg << 8;
9359 inst.instruction |= inst.operands[1].imm;
9360 inst.instruction |= inst.operands[2].reg << 16;
9361
9362 if (inst.operands[3].present)
9363 {
9364 constraint (inst.reloc.exp.X_op != O_constant,
9365 _("expression too complex"));
9366 if (inst.reloc.exp.X_add_number != 0)
9367 {
9368 if (inst.operands[3].shift_kind == SHIFT_ASR)
9369 inst.instruction |= 0x00200000; /* sh bit */
9370
9371 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9372 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9373 }
9374 inst.reloc.type = BFD_RELOC_UNUSED;
9375 }
9376 }
9377
9378 static void
9379 do_t_usat16 (void)
9380 {
9381 inst.instruction |= inst.operands[0].reg << 8;
9382 inst.instruction |= inst.operands[1].imm;
9383 inst.instruction |= inst.operands[2].reg << 16;
9384 }
9385
9386 /* Neon instruction encoder helpers. */
9387
9388 /* Encodings for the different types for various Neon opcodes. */
9389
9390 /* An "invalid" code for the following tables. */
9391 #define N_INV -1u
9392
9393 struct neon_tab_entry
9394 {
9395 unsigned integer;
9396 unsigned float_or_poly;
9397 unsigned scalar_or_imm;
9398 };
9399
9400 /* Map overloaded Neon opcodes to their respective encodings. */
9401 #define NEON_ENC_TAB \
9402 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9403 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9404 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9405 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9406 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9407 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9408 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9409 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9410 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9411 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9412 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9413 /* Register variants of the following two instructions are encoded as
9414 vcge / vcgt with the operands reversed. */ \
9415 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9416 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9417 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9418 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9419 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9420 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9421 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9422 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9423 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9424 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9425 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9426 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9427 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9428 X(vshl, 0x0000400, N_INV, 0x0800510), \
9429 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9430 X(vand, 0x0000110, N_INV, 0x0800030), \
9431 X(vbic, 0x0100110, N_INV, 0x0800030), \
9432 X(veor, 0x1000110, N_INV, N_INV), \
9433 X(vorn, 0x0300110, N_INV, 0x0800010), \
9434 X(vorr, 0x0200110, N_INV, 0x0800010), \
9435 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9436 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9437 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9438 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9439 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9440 X(vst1, 0x0000000, 0x0800000, N_INV), \
9441 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9442 X(vst2, 0x0000100, 0x0800100, N_INV), \
9443 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9444 X(vst3, 0x0000200, 0x0800200, N_INV), \
9445 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9446 X(vst4, 0x0000300, 0x0800300, N_INV), \
9447 X(vmovn, 0x1b20200, N_INV, N_INV), \
9448 X(vtrn, 0x1b20080, N_INV, N_INV), \
9449 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9450 X(vqmovun, 0x1b20240, N_INV, N_INV)
9451
9452 enum neon_opc
9453 {
9454 #define X(OPC,I,F,S) N_MNEM_##OPC
9455 NEON_ENC_TAB
9456 #undef X
9457 };
9458
9459 static const struct neon_tab_entry neon_enc_tab[] =
9460 {
9461 #define X(OPC,I,F,S) { (I), (F), (S) }
9462 NEON_ENC_TAB
9463 #undef X
9464 };
9465
9466 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9467 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9468 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9469 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9470 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9471 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9472 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9473 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9474 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9475
9476 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9477 shapes which an instruction can accept. The following mnemonic characters
9478 are used in the tag names for this enumeration:
9479
9480 D - Neon D<n> register
9481 Q - Neon Q<n> register
9482 I - Immediate
9483 S - Scalar
9484 R - ARM register
9485 L - D<n> register list
9486 */
9487
9488 enum neon_shape
9489 {
9490 NS_DDD_QQQ,
9491 NS_DDD,
9492 NS_QQQ,
9493 NS_DDI_QQI,
9494 NS_DDI,
9495 NS_QQI,
9496 NS_DDS_QQS,
9497 NS_DDS,
9498 NS_QQS,
9499 NS_DD_QQ,
9500 NS_DD,
9501 NS_QQ,
9502 NS_DS_QS,
9503 NS_DS,
9504 NS_QS,
9505 NS_DR_QR,
9506 NS_DR,
9507 NS_QR,
9508 NS_DI_QI,
9509 NS_DI,
9510 NS_QI,
9511 NS_DLD,
9512 NS_DQ,
9513 NS_QD,
9514 NS_DQI,
9515 NS_QDI,
9516 NS_QDD,
9517 NS_QDS,
9518 NS_QQD,
9519 NS_DQQ,
9520 NS_DDDI_QQQI,
9521 NS_DDDI,
9522 NS_QQQI,
9523 NS_IGNORE
9524 };
9525
9526 /* Bit masks used in type checking given instructions.
9527 'N_EQK' means the type must be the same as (or based on in some way) the key
9528 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9529 set, various other bits can be set as well in order to modify the meaning of
9530 the type constraint. */
9531
9532 enum neon_type_mask
9533 {
9534 N_S8 = 0x000001,
9535 N_S16 = 0x000002,
9536 N_S32 = 0x000004,
9537 N_S64 = 0x000008,
9538 N_U8 = 0x000010,
9539 N_U16 = 0x000020,
9540 N_U32 = 0x000040,
9541 N_U64 = 0x000080,
9542 N_I8 = 0x000100,
9543 N_I16 = 0x000200,
9544 N_I32 = 0x000400,
9545 N_I64 = 0x000800,
9546 N_8 = 0x001000,
9547 N_16 = 0x002000,
9548 N_32 = 0x004000,
9549 N_64 = 0x008000,
9550 N_P8 = 0x010000,
9551 N_P16 = 0x020000,
9552 N_F32 = 0x040000,
9553 N_KEY = 0x080000, /* key element (main type specifier). */
9554 N_EQK = 0x100000, /* given operand has the same type & size as the key. */
9555 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
9556 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
9557 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
9558 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9559 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
9560 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
9561 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9562 N_UTYP = 0,
9563 N_MAX_NONSPECIAL = N_F32
9564 };
9565
9566 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9567
9568 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9569 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9570 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9571 #define N_SUF_32 (N_SU_32 | N_F32)
9572 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9573 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9574
9575 /* Pass this as the first type argument to neon_check_type to ignore types
9576 altogether. */
9577 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9578
9579 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9580 specific shape when there are two alternatives. For non-polymorphic shapes,
9581 checking is done during operand parsing, so is not implemented here. */
9582
9583 static enum neon_shape
9584 neon_check_shape (enum neon_shape req)
9585 {
9586 #define RR(X) (inst.operands[(X)].isreg)
9587 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9588 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9589 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9590 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9591
9592 /* Fix missing optional operands. FIXME: we don't know at this point how
9593 many arguments we should have, so this makes the assumption that we have
9594 > 1. This is true of all current Neon opcodes, I think, but may not be
9595 true in the future. */
9596 if (!inst.operands[1].present)
9597 inst.operands[1] = inst.operands[0];
9598
9599 switch (req)
9600 {
9601 case NS_DDD_QQQ:
9602 {
9603 if (RD(0) && RD(1) && RD(2))
9604 return NS_DDD;
9605 else if (RQ(0) && RQ(1) && RQ(2))
9606 return NS_QQQ;
9607 else
9608 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9609 "operands"));
9610 }
9611 break;
9612
9613 case NS_DDI_QQI:
9614 {
9615 if (RD(0) && RD(1) && IM(2))
9616 return NS_DDI;
9617 else if (RQ(0) && RQ(1) && IM(2))
9618 return NS_QQI;
9619 else
9620 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9621 "operands"));
9622 }
9623 break;
9624
9625 case NS_DDDI_QQQI:
9626 {
9627 if (RD(0) && RD(1) && RD(2) && IM(3))
9628 return NS_DDDI;
9629 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9630 return NS_QQQI;
9631 else
9632 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9633 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9634 }
9635 break;
9636
9637 case NS_DDS_QQS:
9638 {
9639 if (RD(0) && RD(1) && SC(2))
9640 return NS_DDS;
9641 else if (RQ(0) && RQ(1) && SC(2))
9642 return NS_QQS;
9643 else
9644 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9645 "operands"));
9646 }
9647 break;
9648
9649 case NS_DD_QQ:
9650 {
9651 if (RD(0) && RD(1))
9652 return NS_DD;
9653 else if (RQ(0) && RQ(1))
9654 return NS_QQ;
9655 else
9656 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9657 }
9658 break;
9659
9660 case NS_DS_QS:
9661 {
9662 if (RD(0) && SC(1))
9663 return NS_DS;
9664 else if (RQ(0) && SC(1))
9665 return NS_QS;
9666 else
9667 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9668 }
9669 break;
9670
9671 case NS_DR_QR:
9672 {
9673 if (RD(0) && RR(1))
9674 return NS_DR;
9675 else if (RQ(0) && RR(1))
9676 return NS_QR;
9677 else
9678 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9679 }
9680 break;
9681
9682 case NS_DI_QI:
9683 {
9684 if (RD(0) && IM(1))
9685 return NS_DI;
9686 else if (RQ(0) && IM(1))
9687 return NS_QI;
9688 else
9689 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9690 }
9691 break;
9692
9693 default:
9694 abort ();
9695 }
9696
9697 return req;
9698 #undef RR
9699 #undef RD
9700 #undef RQ
9701 #undef IM
9702 #undef SC
9703 }
9704
9705 static void
9706 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
9707 unsigned *g_size)
9708 {
9709 /* Allow modification to be made to types which are constrained to be
9710 based on the key element, based on bits set alongside N_EQK. */
9711 if ((typebits & N_EQK) != 0)
9712 {
9713 if ((typebits & N_HLF) != 0)
9714 *g_size /= 2;
9715 else if ((typebits & N_DBL) != 0)
9716 *g_size *= 2;
9717 if ((typebits & N_SGN) != 0)
9718 *g_type = NT_signed;
9719 else if ((typebits & N_UNS) != 0)
9720 *g_type = NT_unsigned;
9721 else if ((typebits & N_INT) != 0)
9722 *g_type = NT_integer;
9723 else if ((typebits & N_FLT) != 0)
9724 *g_type = NT_float;
9725 else if ((typebits & N_SIZ) != 0)
9726 *g_type = NT_untyped;
9727 }
9728 }
9729
9730 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9731 operand type, i.e. the single type specified in a Neon instruction when it
9732 is the only one given. */
9733
9734 static struct neon_type_el
9735 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
9736 {
9737 struct neon_type_el dest = *key;
9738
9739 assert ((thisarg & N_EQK) != 0);
9740
9741 neon_modify_type_size (thisarg, &dest.type, &dest.size);
9742
9743 return dest;
9744 }
9745
9746 /* Convert Neon type and size into compact bitmask representation. */
9747
9748 static enum neon_type_mask
9749 type_chk_of_el_type (enum neon_el_type type, unsigned size)
9750 {
9751 switch (type)
9752 {
9753 case NT_untyped:
9754 switch (size)
9755 {
9756 case 8: return N_8;
9757 case 16: return N_16;
9758 case 32: return N_32;
9759 case 64: return N_64;
9760 default: ;
9761 }
9762 break;
9763
9764 case NT_integer:
9765 switch (size)
9766 {
9767 case 8: return N_I8;
9768 case 16: return N_I16;
9769 case 32: return N_I32;
9770 case 64: return N_I64;
9771 default: ;
9772 }
9773 break;
9774
9775 case NT_float:
9776 if (size == 32)
9777 return N_F32;
9778 break;
9779
9780 case NT_poly:
9781 switch (size)
9782 {
9783 case 8: return N_P8;
9784 case 16: return N_P16;
9785 default: ;
9786 }
9787 break;
9788
9789 case NT_signed:
9790 switch (size)
9791 {
9792 case 8: return N_S8;
9793 case 16: return N_S16;
9794 case 32: return N_S32;
9795 case 64: return N_S64;
9796 default: ;
9797 }
9798 break;
9799
9800 case NT_unsigned:
9801 switch (size)
9802 {
9803 case 8: return N_U8;
9804 case 16: return N_U16;
9805 case 32: return N_U32;
9806 case 64: return N_U64;
9807 default: ;
9808 }
9809 break;
9810
9811 default: ;
9812 }
9813
9814 return N_UTYP;
9815 }
9816
9817 /* Convert compact Neon bitmask type representation to a type and size. Only
9818 handles the case where a single bit is set in the mask. */
9819
9820 static int
9821 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
9822 enum neon_type_mask mask)
9823 {
9824 if ((mask & N_EQK) != 0)
9825 return FAIL;
9826
9827 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
9828 *size = 8;
9829 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
9830 *size = 16;
9831 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
9832 *size = 32;
9833 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64)) != 0)
9834 *size = 64;
9835 else
9836 return FAIL;
9837
9838 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
9839 *type = NT_signed;
9840 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
9841 *type = NT_unsigned;
9842 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
9843 *type = NT_integer;
9844 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
9845 *type = NT_untyped;
9846 else if ((mask & (N_P8 | N_P16)) != 0)
9847 *type = NT_poly;
9848 else if ((mask & N_F32) != 0)
9849 *type = NT_float;
9850 else
9851 return FAIL;
9852
9853 return SUCCESS;
9854 }
9855
9856 /* Modify a bitmask of allowed types. This is only needed for type
9857 relaxation. */
9858
9859 static unsigned
9860 modify_types_allowed (unsigned allowed, unsigned mods)
9861 {
9862 unsigned size;
9863 enum neon_el_type type;
9864 unsigned destmask;
9865 int i;
9866
9867 destmask = 0;
9868
9869 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
9870 {
9871 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
9872 {
9873 neon_modify_type_size (mods, &type, &size);
9874 destmask |= type_chk_of_el_type (type, size);
9875 }
9876 }
9877
9878 return destmask;
9879 }
9880
9881 /* Check type and return type classification.
9882 The manual states (paraphrase): If one datatype is given, it indicates the
9883 type given in:
9884 - the second operand, if there is one
9885 - the operand, if there is no second operand
9886 - the result, if there are no operands.
9887 This isn't quite good enough though, so we use a concept of a "key" datatype
9888 which is set on a per-instruction basis, which is the one which matters when
9889 only one data type is written.
9890 Note: this function has side-effects (e.g. filling in missing operands). All
9891 Neon instructions should call it before performing bit encoding.
9892 */
9893
9894 static struct neon_type_el
9895 neon_check_type (unsigned els, enum neon_shape ns, ...)
9896 {
9897 va_list ap;
9898 unsigned i, pass, key_el = 0;
9899 unsigned types[NEON_MAX_TYPE_ELS];
9900 enum neon_el_type k_type = NT_invtype;
9901 unsigned k_size = -1u;
9902 struct neon_type_el badtype = {NT_invtype, -1};
9903 unsigned key_allowed = 0;
9904
9905 /* Optional registers in Neon instructions are always (not) in operand 1.
9906 Fill in the missing operand here, if it was omitted. */
9907 if (els > 1 && !inst.operands[1].present)
9908 inst.operands[1] = inst.operands[0];
9909
9910 /* Suck up all the varargs. */
9911 va_start (ap, ns);
9912 for (i = 0; i < els; i++)
9913 {
9914 unsigned thisarg = va_arg (ap, unsigned);
9915 if (thisarg == N_IGNORE_TYPE)
9916 {
9917 va_end (ap);
9918 return badtype;
9919 }
9920 types[i] = thisarg;
9921 if ((thisarg & N_KEY) != 0)
9922 key_el = i;
9923 }
9924 va_end (ap);
9925
9926 if (inst.vectype.elems > 0)
9927 for (i = 0; i < els; i++)
9928 if (inst.operands[i].vectype.type != NT_invtype)
9929 {
9930 first_error (_("types specified in both the mnemonic and operands"));
9931 return badtype;
9932 }
9933
9934 /* Duplicate inst.vectype elements here as necessary.
9935 FIXME: No idea if this is exactly the same as the ARM assembler,
9936 particularly when an insn takes one register and one non-register
9937 operand. */
9938 if (inst.vectype.elems == 1 && els > 1)
9939 {
9940 unsigned j;
9941 inst.vectype.elems = els;
9942 inst.vectype.el[key_el] = inst.vectype.el[0];
9943 for (j = 0; j < els; j++)
9944 if (j != key_el)
9945 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
9946 types[j]);
9947 }
9948 else if (inst.vectype.elems == 0 && els > 0)
9949 {
9950 unsigned j;
9951 /* No types were given after the mnemonic, so look for types specified
9952 after each operand. We allow some flexibility here; as long as the
9953 "key" operand has a type, we can infer the others. */
9954 for (j = 0; j < els; j++)
9955 if (inst.operands[j].vectype.type != NT_invtype)
9956 inst.vectype.el[j] = inst.operands[j].vectype;
9957
9958 if (inst.operands[key_el].vectype.type != NT_invtype)
9959 {
9960 for (j = 0; j < els; j++)
9961 if (inst.operands[j].vectype.type == NT_invtype)
9962 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
9963 types[j]);
9964 }
9965 else
9966 {
9967 first_error (_("operand types can't be inferred"));
9968 return badtype;
9969 }
9970 }
9971 else if (inst.vectype.elems != els)
9972 {
9973 first_error (_("type specifier has the wrong number of parts"));
9974 return badtype;
9975 }
9976
9977 for (pass = 0; pass < 2; pass++)
9978 {
9979 for (i = 0; i < els; i++)
9980 {
9981 unsigned thisarg = types[i];
9982 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
9983 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
9984 enum neon_el_type g_type = inst.vectype.el[i].type;
9985 unsigned g_size = inst.vectype.el[i].size;
9986
9987 /* Decay more-specific signed & unsigned types to sign-insensitive
9988 integer types if sign-specific variants are unavailable. */
9989 if ((g_type == NT_signed || g_type == NT_unsigned)
9990 && (types_allowed & N_SU_ALL) == 0)
9991 g_type = NT_integer;
9992
9993 /* If only untyped args are allowed, decay any more specific types to
9994 them. Some instructions only care about signs for some element
9995 sizes, so handle that properly. */
9996 if ((g_size == 8 && (types_allowed & N_8) != 0)
9997 || (g_size == 16 && (types_allowed & N_16) != 0)
9998 || (g_size == 32 && (types_allowed & N_32) != 0)
9999 || (g_size == 64 && (types_allowed & N_64) != 0))
10000 g_type = NT_untyped;
10001
10002 if (pass == 0)
10003 {
10004 if ((thisarg & N_KEY) != 0)
10005 {
10006 k_type = g_type;
10007 k_size = g_size;
10008 key_allowed = thisarg & ~N_KEY;
10009 }
10010 }
10011 else
10012 {
10013 if ((thisarg & N_EQK) == 0)
10014 {
10015 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10016
10017 if ((given_type & types_allowed) == 0)
10018 {
10019 first_error (_("bad type in Neon instruction"));
10020 return badtype;
10021 }
10022 }
10023 else
10024 {
10025 enum neon_el_type mod_k_type = k_type;
10026 unsigned mod_k_size = k_size;
10027 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10028 if (g_type != mod_k_type || g_size != mod_k_size)
10029 {
10030 first_error (_("inconsistent types in Neon instruction"));
10031 return badtype;
10032 }
10033 }
10034 }
10035 }
10036 }
10037
10038 return inst.vectype.el[key_el];
10039 }
10040
10041 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10042 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10043
10044 static unsigned
10045 neon_dp_fixup (unsigned i)
10046 {
10047 if (thumb_mode)
10048 {
10049 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10050 if (i & (1 << 24))
10051 i |= 1 << 28;
10052
10053 i &= ~(1 << 24);
10054
10055 i |= 0xef000000;
10056 }
10057 else
10058 i |= 0xf2000000;
10059
10060 return i;
10061 }
10062
10063 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10064 (0, 1, 2, 3). */
10065
10066 static unsigned
10067 neon_logbits (unsigned x)
10068 {
10069 return ffs (x) - 4;
10070 }
10071
10072 #define LOW4(R) ((R) & 0xf)
10073 #define HI1(R) (((R) >> 4) & 1)
10074
10075 /* Encode insns with bit pattern:
10076
10077 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10078 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10079
10080 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10081 different meaning for some instruction. */
10082
10083 static void
10084 neon_three_same (int isquad, int ubit, int size)
10085 {
10086 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10087 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10088 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10089 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10090 inst.instruction |= LOW4 (inst.operands[2].reg);
10091 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10092 inst.instruction |= (isquad != 0) << 6;
10093 inst.instruction |= (ubit != 0) << 24;
10094 if (size != -1)
10095 inst.instruction |= neon_logbits (size) << 20;
10096
10097 inst.instruction = neon_dp_fixup (inst.instruction);
10098 }
10099
10100 /* Encode instructions of the form:
10101
10102 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10103 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10104
10105 Don't write size if SIZE == -1. */
10106
10107 static void
10108 neon_two_same (int qbit, int ubit, int size)
10109 {
10110 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10111 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10112 inst.instruction |= LOW4 (inst.operands[1].reg);
10113 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10114 inst.instruction |= (qbit != 0) << 6;
10115 inst.instruction |= (ubit != 0) << 24;
10116
10117 if (size != -1)
10118 inst.instruction |= neon_logbits (size) << 18;
10119
10120 inst.instruction = neon_dp_fixup (inst.instruction);
10121 }
10122
10123 /* Neon instruction encoders, in approximate order of appearance. */
10124
10125 static void
10126 do_neon_dyadic_i_su (void)
10127 {
10128 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10129 struct neon_type_el et = neon_check_type (3, rs,
10130 N_EQK, N_EQK, N_SU_32 | N_KEY);
10131 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10132 }
10133
10134 static void
10135 do_neon_dyadic_i64_su (void)
10136 {
10137 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10138 struct neon_type_el et = neon_check_type (3, rs,
10139 N_EQK, N_EQK, N_SU_ALL | N_KEY);
10140 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10141 }
10142
10143 static void
10144 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
10145 unsigned immbits)
10146 {
10147 unsigned size = et.size >> 3;
10148 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10149 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10150 inst.instruction |= LOW4 (inst.operands[1].reg);
10151 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10152 inst.instruction |= (isquad != 0) << 6;
10153 inst.instruction |= immbits << 16;
10154 inst.instruction |= (size >> 3) << 7;
10155 inst.instruction |= (size & 0x7) << 19;
10156 if (write_ubit)
10157 inst.instruction |= (uval != 0) << 24;
10158
10159 inst.instruction = neon_dp_fixup (inst.instruction);
10160 }
10161
10162 static void
10163 do_neon_shl_imm (void)
10164 {
10165 if (!inst.operands[2].isreg)
10166 {
10167 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10168 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
10169 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10170 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, inst.operands[2].imm);
10171 }
10172 else
10173 {
10174 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10175 struct neon_type_el et = neon_check_type (3, rs,
10176 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10177 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10178 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10179 }
10180 }
10181
10182 static void
10183 do_neon_qshl_imm (void)
10184 {
10185 if (!inst.operands[2].isreg)
10186 {
10187 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10188 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
10189 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10190 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
10191 inst.operands[2].imm);
10192 }
10193 else
10194 {
10195 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10196 struct neon_type_el et = neon_check_type (3, rs,
10197 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10198 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10199 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10200 }
10201 }
10202
10203 static int
10204 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
10205 {
10206 /* Handle .I8 and .I64 as pseudo-instructions. */
10207 switch (size)
10208 {
10209 case 8:
10210 /* Unfortunately, this will make everything apart from zero out-of-range.
10211 FIXME is this the intended semantics? There doesn't seem much point in
10212 accepting .I8 if so. */
10213 immediate |= immediate << 8;
10214 size = 16;
10215 break;
10216 case 64:
10217 /* Similarly, anything other than zero will be replicated in bits [63:32],
10218 which probably isn't want we want if we specified .I64. */
10219 if (immediate != 0)
10220 goto bad_immediate;
10221 size = 32;
10222 break;
10223 default: ;
10224 }
10225
10226 if (immediate == (immediate & 0x000000ff))
10227 {
10228 *immbits = immediate;
10229 return (size == 16) ? 0x9 : 0x1;
10230 }
10231 else if (immediate == (immediate & 0x0000ff00))
10232 {
10233 *immbits = immediate >> 8;
10234 return (size == 16) ? 0xb : 0x3;
10235 }
10236 else if (immediate == (immediate & 0x00ff0000))
10237 {
10238 *immbits = immediate >> 16;
10239 return 0x5;
10240 }
10241 else if (immediate == (immediate & 0xff000000))
10242 {
10243 *immbits = immediate >> 24;
10244 return 0x7;
10245 }
10246
10247 bad_immediate:
10248 first_error (_("immediate value out of range"));
10249 return FAIL;
10250 }
10251
10252 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10253 A, B, C, D. */
10254
10255 static int
10256 neon_bits_same_in_bytes (unsigned imm)
10257 {
10258 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
10259 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
10260 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
10261 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
10262 }
10263
10264 /* For immediate of above form, return 0bABCD. */
10265
10266 static unsigned
10267 neon_squash_bits (unsigned imm)
10268 {
10269 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
10270 | ((imm & 0x01000000) >> 21);
10271 }
10272
10273 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10274
10275 static unsigned
10276 neon_qfloat_bits (unsigned imm)
10277 {
10278 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
10279 }
10280
10281 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10282 the instruction. *OP is passed as the initial value of the op field, and
10283 may be set to a different value depending on the constant (i.e.
10284 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10285 MVN). */
10286
10287 static int
10288 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
10289 int *op, int size, enum neon_el_type type)
10290 {
10291 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
10292 {
10293 if (size != 32 || *op == 1)
10294 return FAIL;
10295 *immbits = neon_qfloat_bits (immlo);
10296 return 0xf;
10297 }
10298 else if (size == 64 && neon_bits_same_in_bytes (immhi)
10299 && neon_bits_same_in_bytes (immlo))
10300 {
10301 /* Check this one first so we don't have to bother with immhi in later
10302 tests. */
10303 if (*op == 1)
10304 return FAIL;
10305 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
10306 *op = 1;
10307 return 0xe;
10308 }
10309 else if (immhi != 0)
10310 return FAIL;
10311 else if (immlo == (immlo & 0x000000ff))
10312 {
10313 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10314 immediate. */
10315 if ((size != 8 && size != 16 && size != 32)
10316 || (size == 8 && *op == 1))
10317 return FAIL;
10318 *immbits = immlo;
10319 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
10320 }
10321 else if (immlo == (immlo & 0x0000ff00))
10322 {
10323 if (size != 16 && size != 32)
10324 return FAIL;
10325 *immbits = immlo >> 8;
10326 return (size == 16) ? 0xa : 0x2;
10327 }
10328 else if (immlo == (immlo & 0x00ff0000))
10329 {
10330 if (size != 32)
10331 return FAIL;
10332 *immbits = immlo >> 16;
10333 return 0x4;
10334 }
10335 else if (immlo == (immlo & 0xff000000))
10336 {
10337 if (size != 32)
10338 return FAIL;
10339 *immbits = immlo >> 24;
10340 return 0x6;
10341 }
10342 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
10343 {
10344 if (size != 32)
10345 return FAIL;
10346 *immbits = (immlo >> 8) & 0xff;
10347 return 0xc;
10348 }
10349 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
10350 {
10351 if (size != 32)
10352 return FAIL;
10353 *immbits = (immlo >> 16) & 0xff;
10354 return 0xd;
10355 }
10356
10357 return FAIL;
10358 }
10359
10360 /* Write immediate bits [7:0] to the following locations:
10361
10362 |28/24|23 19|18 16|15 4|3 0|
10363 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10364
10365 This function is used by VMOV/VMVN/VORR/VBIC. */
10366
10367 static void
10368 neon_write_immbits (unsigned immbits)
10369 {
10370 inst.instruction |= immbits & 0xf;
10371 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
10372 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
10373 }
10374
10375 /* Invert low-order SIZE bits of XHI:XLO. */
10376
10377 static void
10378 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
10379 {
10380 unsigned immlo = xlo ? *xlo : 0;
10381 unsigned immhi = xhi ? *xhi : 0;
10382
10383 switch (size)
10384 {
10385 case 8:
10386 immlo = (~immlo) & 0xff;
10387 break;
10388
10389 case 16:
10390 immlo = (~immlo) & 0xffff;
10391 break;
10392
10393 case 64:
10394 immhi = (~immhi) & 0xffffffff;
10395 /* fall through. */
10396
10397 case 32:
10398 immlo = (~immlo) & 0xffffffff;
10399 break;
10400
10401 default:
10402 abort ();
10403 }
10404
10405 if (xlo)
10406 *xlo = immlo;
10407
10408 if (xhi)
10409 *xhi = immhi;
10410 }
10411
10412 static void
10413 do_neon_logic (void)
10414 {
10415 if (inst.operands[2].present && inst.operands[2].isreg)
10416 {
10417 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10418 neon_check_type (3, rs, N_IGNORE_TYPE);
10419 /* U bit and size field were set as part of the bitmask. */
10420 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10421 neon_three_same (rs == NS_QQQ, 0, -1);
10422 }
10423 else
10424 {
10425 enum neon_shape rs = neon_check_shape (NS_DI_QI);
10426 struct neon_type_el et = neon_check_type (1, rs, N_I8 | N_I16 | N_I32
10427 | N_I64 | N_F32);
10428 enum neon_opc opcode = inst.instruction & 0x0fffffff;
10429 unsigned immbits;
10430 int cmode;
10431
10432 if (et.type == NT_invtype)
10433 return;
10434
10435 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10436
10437 switch (opcode)
10438 {
10439 case N_MNEM_vbic:
10440 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10441 et.size);
10442 break;
10443
10444 case N_MNEM_vorr:
10445 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10446 et.size);
10447 break;
10448
10449 case N_MNEM_vand:
10450 /* Pseudo-instruction for VBIC. */
10451 immbits = inst.operands[1].imm;
10452 neon_invert_size (&immbits, 0, et.size);
10453 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10454 break;
10455
10456 case N_MNEM_vorn:
10457 /* Pseudo-instruction for VORR. */
10458 immbits = inst.operands[1].imm;
10459 neon_invert_size (&immbits, 0, et.size);
10460 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10461 break;
10462
10463 default:
10464 abort ();
10465 }
10466
10467 if (cmode == FAIL)
10468 return;
10469
10470 inst.instruction |= (rs == NS_QI) << 6;
10471 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10472 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10473 inst.instruction |= cmode << 8;
10474 neon_write_immbits (immbits);
10475
10476 inst.instruction = neon_dp_fixup (inst.instruction);
10477 }
10478 }
10479
10480 static void
10481 do_neon_bitfield (void)
10482 {
10483 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10484 neon_check_type (3, rs, N_IGNORE_TYPE);
10485 neon_three_same (rs == NS_QQQ, 0, -1);
10486 }
10487
10488 static void
10489 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
10490 unsigned destbits)
10491 {
10492 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10493 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
10494 types | N_KEY);
10495 if (et.type == NT_float)
10496 {
10497 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
10498 neon_three_same (rs == NS_QQQ, 0, -1);
10499 }
10500 else
10501 {
10502 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10503 neon_three_same (rs == NS_QQQ, et.type == ubit_meaning, et.size);
10504 }
10505 }
10506
10507 static void
10508 do_neon_dyadic_if_su (void)
10509 {
10510 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10511 }
10512
10513 static void
10514 do_neon_dyadic_if_su_d (void)
10515 {
10516 /* This version only allow D registers, but that constraint is enforced during
10517 operand parsing so we don't need to do anything extra here. */
10518 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10519 }
10520
10521 static void
10522 do_neon_dyadic_if_i (void)
10523 {
10524 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10525 }
10526
10527 static void
10528 do_neon_dyadic_if_i_d (void)
10529 {
10530 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10531 }
10532
10533 static void
10534 do_neon_addsub_if_i (void)
10535 {
10536 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10537 affected if we specify unsigned args. */
10538 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
10539 }
10540
10541 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10542 result to be:
10543 V<op> A,B (A is operand 0, B is operand 2)
10544 to mean:
10545 V<op> A,B,A
10546 not:
10547 V<op> A,B,B
10548 so handle that case specially. */
10549
10550 static void
10551 neon_exchange_operands (void)
10552 {
10553 void *scratch = alloca (sizeof (inst.operands[0]));
10554 if (inst.operands[1].present)
10555 {
10556 /* Swap operands[1] and operands[2]. */
10557 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
10558 inst.operands[1] = inst.operands[2];
10559 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
10560 }
10561 else
10562 {
10563 inst.operands[1] = inst.operands[2];
10564 inst.operands[2] = inst.operands[0];
10565 }
10566 }
10567
10568 static void
10569 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
10570 {
10571 if (inst.operands[2].isreg)
10572 {
10573 if (invert)
10574 neon_exchange_operands ();
10575 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
10576 }
10577 else
10578 {
10579 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10580 struct neon_type_el et = neon_check_type (2, rs,
10581 N_EQK | N_SIZ, immtypes | N_KEY);
10582
10583 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10584 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10585 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10586 inst.instruction |= LOW4 (inst.operands[1].reg);
10587 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10588 inst.instruction |= (rs == NS_QQI) << 6;
10589 inst.instruction |= (et.type == NT_float) << 10;
10590 inst.instruction |= neon_logbits (et.size) << 18;
10591
10592 inst.instruction = neon_dp_fixup (inst.instruction);
10593 }
10594 }
10595
10596 static void
10597 do_neon_cmp (void)
10598 {
10599 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
10600 }
10601
10602 static void
10603 do_neon_cmp_inv (void)
10604 {
10605 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
10606 }
10607
10608 static void
10609 do_neon_ceq (void)
10610 {
10611 neon_compare (N_IF_32, N_IF_32, FALSE);
10612 }
10613
10614 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10615 scalars, which are encoded in 5 bits, M : Rm.
10616 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10617 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10618 index in M. */
10619
10620 static unsigned
10621 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
10622 {
10623 unsigned regno = NEON_SCALAR_REG (scalar);
10624 unsigned elno = NEON_SCALAR_INDEX (scalar);
10625
10626 switch (elsize)
10627 {
10628 case 16:
10629 if (regno > 7 || elno > 3)
10630 goto bad_scalar;
10631 return regno | (elno << 3);
10632
10633 case 32:
10634 if (regno > 15 || elno > 1)
10635 goto bad_scalar;
10636 return regno | (elno << 4);
10637
10638 default:
10639 bad_scalar:
10640 first_error (_("scalar out of range for multiply instruction"));
10641 }
10642
10643 return 0;
10644 }
10645
10646 /* Encode multiply / multiply-accumulate scalar instructions. */
10647
10648 static void
10649 neon_mul_mac (struct neon_type_el et, int ubit)
10650 {
10651 unsigned scalar;
10652
10653 /* Give a more helpful error message if we have an invalid type. */
10654 if (et.type == NT_invtype)
10655 return;
10656
10657 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
10658 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10659 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10660 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10661 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10662 inst.instruction |= LOW4 (scalar);
10663 inst.instruction |= HI1 (scalar) << 5;
10664 inst.instruction |= (et.type == NT_float) << 8;
10665 inst.instruction |= neon_logbits (et.size) << 20;
10666 inst.instruction |= (ubit != 0) << 24;
10667
10668 inst.instruction = neon_dp_fixup (inst.instruction);
10669 }
10670
10671 static void
10672 do_neon_mac_maybe_scalar (void)
10673 {
10674 if (inst.operands[2].isscalar)
10675 {
10676 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10677 struct neon_type_el et = neon_check_type (3, rs,
10678 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
10679 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10680 neon_mul_mac (et, rs == NS_QQS);
10681 }
10682 else
10683 do_neon_dyadic_if_i ();
10684 }
10685
10686 static void
10687 do_neon_tst (void)
10688 {
10689 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10690 struct neon_type_el et = neon_check_type (3, rs,
10691 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
10692 neon_three_same (rs == NS_QQQ, 0, et.size);
10693 }
10694
10695 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10696 same types as the MAC equivalents. The polynomial type for this instruction
10697 is encoded the same as the integer type. */
10698
10699 static void
10700 do_neon_mul (void)
10701 {
10702 if (inst.operands[2].isscalar)
10703 do_neon_mac_maybe_scalar ();
10704 else
10705 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
10706 }
10707
10708 static void
10709 do_neon_qdmulh (void)
10710 {
10711 if (inst.operands[2].isscalar)
10712 {
10713 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10714 struct neon_type_el et = neon_check_type (3, rs,
10715 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10716 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10717 neon_mul_mac (et, rs == NS_QQS);
10718 }
10719 else
10720 {
10721 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10722 struct neon_type_el et = neon_check_type (3, rs,
10723 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10724 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10725 /* The U bit (rounding) comes from bit mask. */
10726 neon_three_same (rs == NS_QQQ, 0, et.size);
10727 }
10728 }
10729
10730 static void
10731 do_neon_fcmp_absolute (void)
10732 {
10733 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10734 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10735 /* Size field comes from bit mask. */
10736 neon_three_same (rs == NS_QQQ, 1, -1);
10737 }
10738
10739 static void
10740 do_neon_fcmp_absolute_inv (void)
10741 {
10742 neon_exchange_operands ();
10743 do_neon_fcmp_absolute ();
10744 }
10745
10746 static void
10747 do_neon_step (void)
10748 {
10749 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10750 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10751 neon_three_same (rs == NS_QQQ, 0, -1);
10752 }
10753
10754 static void
10755 do_neon_abs_neg (void)
10756 {
10757 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10758 struct neon_type_el et = neon_check_type (3, rs,
10759 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
10760 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10761 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10762 inst.instruction |= LOW4 (inst.operands[1].reg);
10763 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10764 inst.instruction |= (rs == NS_QQ) << 6;
10765 inst.instruction |= (et.type == NT_float) << 10;
10766 inst.instruction |= neon_logbits (et.size) << 18;
10767
10768 inst.instruction = neon_dp_fixup (inst.instruction);
10769 }
10770
10771 static void
10772 do_neon_sli (void)
10773 {
10774 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10775 struct neon_type_el et = neon_check_type (2, rs,
10776 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10777 int imm = inst.operands[2].imm;
10778 constraint (imm < 0 || (unsigned)imm >= et.size,
10779 _("immediate out of range for insert"));
10780 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10781 }
10782
10783 static void
10784 do_neon_sri (void)
10785 {
10786 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10787 struct neon_type_el et = neon_check_type (2, rs,
10788 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10789 int imm = inst.operands[2].imm;
10790 constraint (imm < 1 || (unsigned)imm > et.size,
10791 _("immediate out of range for insert"));
10792 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, et.size - imm);
10793 }
10794
10795 static void
10796 do_neon_qshlu_imm (void)
10797 {
10798 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10799 struct neon_type_el et = neon_check_type (2, rs,
10800 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
10801 int imm = inst.operands[2].imm;
10802 constraint (imm < 0 || (unsigned)imm >= et.size,
10803 _("immediate out of range for shift"));
10804 /* Only encodes the 'U present' variant of the instruction.
10805 In this case, signed types have OP (bit 8) set to 0.
10806 Unsigned types have OP set to 1. */
10807 inst.instruction |= (et.type == NT_unsigned) << 8;
10808 /* The rest of the bits are the same as other immediate shifts. */
10809 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10810 }
10811
10812 static void
10813 do_neon_qmovn (void)
10814 {
10815 struct neon_type_el et = neon_check_type (2, NS_DQ,
10816 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10817 /* Saturating move where operands can be signed or unsigned, and the
10818 destination has the same signedness. */
10819 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10820 if (et.type == NT_unsigned)
10821 inst.instruction |= 0xc0;
10822 else
10823 inst.instruction |= 0x80;
10824 neon_two_same (0, 1, et.size / 2);
10825 }
10826
10827 static void
10828 do_neon_qmovun (void)
10829 {
10830 struct neon_type_el et = neon_check_type (2, NS_DQ,
10831 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10832 /* Saturating move with unsigned results. Operands must be signed. */
10833 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10834 neon_two_same (0, 1, et.size / 2);
10835 }
10836
10837 static void
10838 do_neon_rshift_sat_narrow (void)
10839 {
10840 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10841 or unsigned. If operands are unsigned, results must also be unsigned. */
10842 struct neon_type_el et = neon_check_type (2, NS_DQI,
10843 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10844 int imm = inst.operands[2].imm;
10845 /* This gets the bounds check, size encoding and immediate bits calculation
10846 right. */
10847 et.size /= 2;
10848
10849 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10850 VQMOVN.I<size> <Dd>, <Qm>. */
10851 if (imm == 0)
10852 {
10853 inst.operands[2].present = 0;
10854 inst.instruction = N_MNEM_vqmovn;
10855 do_neon_qmovn ();
10856 return;
10857 }
10858
10859 constraint (imm < 1 || (unsigned)imm > et.size,
10860 _("immediate out of range"));
10861 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
10862 }
10863
10864 static void
10865 do_neon_rshift_sat_narrow_u (void)
10866 {
10867 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10868 or unsigned. If operands are unsigned, results must also be unsigned. */
10869 struct neon_type_el et = neon_check_type (2, NS_DQI,
10870 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10871 int imm = inst.operands[2].imm;
10872 /* This gets the bounds check, size encoding and immediate bits calculation
10873 right. */
10874 et.size /= 2;
10875
10876 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10877 VQMOVUN.I<size> <Dd>, <Qm>. */
10878 if (imm == 0)
10879 {
10880 inst.operands[2].present = 0;
10881 inst.instruction = N_MNEM_vqmovun;
10882 do_neon_qmovun ();
10883 return;
10884 }
10885
10886 constraint (imm < 1 || (unsigned)imm > et.size,
10887 _("immediate out of range"));
10888 /* FIXME: The manual is kind of unclear about what value U should have in
10889 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10890 must be 1. */
10891 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
10892 }
10893
10894 static void
10895 do_neon_movn (void)
10896 {
10897 struct neon_type_el et = neon_check_type (2, NS_DQ,
10898 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10899 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10900 neon_two_same (0, 1, et.size / 2);
10901 }
10902
10903 static void
10904 do_neon_rshift_narrow (void)
10905 {
10906 struct neon_type_el et = neon_check_type (2, NS_DQI,
10907 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10908 int imm = inst.operands[2].imm;
10909 /* This gets the bounds check, size encoding and immediate bits calculation
10910 right. */
10911 et.size /= 2;
10912
10913 /* If immediate is zero then we are a pseudo-instruction for
10914 VMOVN.I<size> <Dd>, <Qm> */
10915 if (imm == 0)
10916 {
10917 inst.operands[2].present = 0;
10918 inst.instruction = N_MNEM_vmovn;
10919 do_neon_movn ();
10920 return;
10921 }
10922
10923 constraint (imm < 1 || (unsigned)imm > et.size,
10924 _("immediate out of range for narrowing operation"));
10925 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
10926 }
10927
10928 static void
10929 do_neon_shll (void)
10930 {
10931 /* FIXME: Type checking when lengthening. */
10932 struct neon_type_el et = neon_check_type (2, NS_QDI,
10933 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
10934 unsigned imm = inst.operands[2].imm;
10935
10936 if (imm == et.size)
10937 {
10938 /* Maximum shift variant. */
10939 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10940 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10941 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10942 inst.instruction |= LOW4 (inst.operands[1].reg);
10943 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10944 inst.instruction |= neon_logbits (et.size) << 18;
10945
10946 inst.instruction = neon_dp_fixup (inst.instruction);
10947 }
10948 else
10949 {
10950 /* A more-specific type check for non-max versions. */
10951 et = neon_check_type (2, NS_QDI,
10952 N_EQK | N_DBL, N_SU_32 | N_KEY);
10953 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10954 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
10955 }
10956 }
10957
10958 /* Check the various types for the VCVT instruction, and return the one that
10959 the current instruction is. */
10960
10961 static int
10962 neon_cvt_flavour (enum neon_shape rs)
10963 {
10964 #define CVT_VAR(C,X,Y) \
10965 et = neon_check_type (2, rs, (X), (Y)); \
10966 if (et.type != NT_invtype) \
10967 { \
10968 inst.error = NULL; \
10969 return (C); \
10970 }
10971 struct neon_type_el et;
10972
10973 CVT_VAR (0, N_S32, N_F32);
10974 CVT_VAR (1, N_U32, N_F32);
10975 CVT_VAR (2, N_F32, N_S32);
10976 CVT_VAR (3, N_F32, N_U32);
10977
10978 return -1;
10979 #undef CVT_VAR
10980 }
10981
10982 static void
10983 do_neon_cvt (void)
10984 {
10985 /* Fixed-point conversion with #0 immediate is encoded as an integer
10986 conversion. */
10987 if (inst.operands[2].present && inst.operands[2].imm != 0)
10988 {
10989 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10990 int flavour = neon_cvt_flavour (rs);
10991 unsigned immbits = 32 - inst.operands[2].imm;
10992 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
10993 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10994 if (flavour != -1)
10995 inst.instruction |= enctab[flavour];
10996 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10997 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10998 inst.instruction |= LOW4 (inst.operands[1].reg);
10999 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11000 inst.instruction |= (rs == NS_QQI) << 6;
11001 inst.instruction |= 1 << 21;
11002 inst.instruction |= immbits << 16;
11003 }
11004 else
11005 {
11006 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11007 int flavour = neon_cvt_flavour (rs);
11008 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
11009 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11010 if (flavour != -1)
11011 inst.instruction |= enctab[flavour];
11012 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11013 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11014 inst.instruction |= LOW4 (inst.operands[1].reg);
11015 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11016 inst.instruction |= (rs == NS_QQ) << 6;
11017 inst.instruction |= 2 << 18;
11018 }
11019 inst.instruction = neon_dp_fixup (inst.instruction);
11020 }
11021
11022 static void
11023 neon_move_immediate (void)
11024 {
11025 enum neon_shape rs = neon_check_shape (NS_DI_QI);
11026 struct neon_type_el et = neon_check_type (1, rs,
11027 N_I8 | N_I16 | N_I32 | N_I64 | N_F32);
11028 unsigned immlo, immhi = 0, immbits;
11029 int op, cmode;
11030
11031 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11032 op = (inst.instruction & (1 << 5)) != 0;
11033
11034 immlo = inst.operands[1].imm;
11035 if (inst.operands[1].regisimm)
11036 immhi = inst.operands[1].reg;
11037
11038 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
11039 _("immediate has bits set outside the operand size"));
11040
11041 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11042 et.size, et.type)) == FAIL)
11043 {
11044 /* Invert relevant bits only. */
11045 neon_invert_size (&immlo, &immhi, et.size);
11046 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11047 with one or the other; those cases are caught by
11048 neon_cmode_for_move_imm. */
11049 op = !op;
11050 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11051 et.size, et.type)) == FAIL)
11052 {
11053 first_error (_("immediate out of range"));
11054 return;
11055 }
11056 }
11057
11058 inst.instruction &= ~(1 << 5);
11059 inst.instruction |= op << 5;
11060
11061 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11062 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11063 inst.instruction |= (rs == NS_QI) << 6;
11064 inst.instruction |= cmode << 8;
11065
11066 neon_write_immbits (immbits);
11067 }
11068
11069 static void
11070 do_neon_mvn (void)
11071 {
11072 if (inst.operands[1].isreg)
11073 {
11074 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11075
11076 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11077 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11078 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11079 inst.instruction |= LOW4 (inst.operands[1].reg);
11080 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11081 inst.instruction |= (rs == NS_QQ) << 6;
11082 }
11083 else
11084 {
11085 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11086 neon_move_immediate ();
11087 }
11088
11089 inst.instruction = neon_dp_fixup (inst.instruction);
11090 }
11091
11092 /* Encode instructions of form:
11093
11094 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11095 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11096
11097 */
11098
11099 static void
11100 neon_mixed_length (struct neon_type_el et, unsigned size)
11101 {
11102 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11103 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11104 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11105 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11106 inst.instruction |= LOW4 (inst.operands[2].reg);
11107 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11108 inst.instruction |= (et.type == NT_unsigned) << 24;
11109 inst.instruction |= neon_logbits (size) << 20;
11110
11111 inst.instruction = neon_dp_fixup (inst.instruction);
11112 }
11113
11114 static void
11115 do_neon_dyadic_long (void)
11116 {
11117 /* FIXME: Type checking for lengthening op. */
11118 struct neon_type_el et = neon_check_type (3, NS_QDD,
11119 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
11120 neon_mixed_length (et, et.size);
11121 }
11122
11123 static void
11124 do_neon_abal (void)
11125 {
11126 struct neon_type_el et = neon_check_type (3, NS_QDD,
11127 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
11128 neon_mixed_length (et, et.size);
11129 }
11130
11131 static void
11132 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
11133 {
11134 if (inst.operands[2].isscalar)
11135 {
11136 struct neon_type_el et = neon_check_type (3, NS_QDS,
11137 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
11138 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11139 neon_mul_mac (et, et.type == NT_unsigned);
11140 }
11141 else
11142 {
11143 struct neon_type_el et = neon_check_type (3, NS_QDD,
11144 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
11145 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11146 neon_mixed_length (et, et.size);
11147 }
11148 }
11149
11150 static void
11151 do_neon_mac_maybe_scalar_long (void)
11152 {
11153 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
11154 }
11155
11156 static void
11157 do_neon_dyadic_wide (void)
11158 {
11159 struct neon_type_el et = neon_check_type (3, NS_QQD,
11160 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
11161 neon_mixed_length (et, et.size);
11162 }
11163
11164 static void
11165 do_neon_dyadic_narrow (void)
11166 {
11167 struct neon_type_el et = neon_check_type (3, NS_QDD,
11168 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
11169 neon_mixed_length (et, et.size / 2);
11170 }
11171
11172 static void
11173 do_neon_mul_sat_scalar_long (void)
11174 {
11175 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
11176 }
11177
11178 static void
11179 do_neon_vmull (void)
11180 {
11181 if (inst.operands[2].isscalar)
11182 do_neon_mac_maybe_scalar_long ();
11183 else
11184 {
11185 struct neon_type_el et = neon_check_type (3, NS_QDD,
11186 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
11187 if (et.type == NT_poly)
11188 inst.instruction = NEON_ENC_POLY (inst.instruction);
11189 else
11190 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11191 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11192 zero. Should be OK as-is. */
11193 neon_mixed_length (et, et.size);
11194 }
11195 }
11196
11197 static void
11198 do_neon_ext (void)
11199 {
11200 enum neon_shape rs = neon_check_shape (NS_DDDI_QQQI);
11201 struct neon_type_el et = neon_check_type (3, rs,
11202 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
11203 unsigned imm = (inst.operands[3].imm * et.size) / 8;
11204 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11205 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11206 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11207 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11208 inst.instruction |= LOW4 (inst.operands[2].reg);
11209 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11210 inst.instruction |= (rs == NS_QQQI) << 6;
11211 inst.instruction |= imm << 8;
11212
11213 inst.instruction = neon_dp_fixup (inst.instruction);
11214 }
11215
11216 static void
11217 do_neon_rev (void)
11218 {
11219 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11220 struct neon_type_el et = neon_check_type (2, rs,
11221 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11222 unsigned op = (inst.instruction >> 7) & 3;
11223 /* N (width of reversed regions) is encoded as part of the bitmask. We
11224 extract it here to check the elements to be reversed are smaller.
11225 Otherwise we'd get a reserved instruction. */
11226 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
11227 assert (elsize != 0);
11228 constraint (et.size >= elsize,
11229 _("elements must be smaller than reversal region"));
11230 neon_two_same (rs == NS_QQ, 1, et.size);
11231 }
11232
11233 static void
11234 do_neon_dup (void)
11235 {
11236 if (inst.operands[1].isscalar)
11237 {
11238 enum neon_shape rs = neon_check_shape (NS_DS_QS);
11239 struct neon_type_el et = neon_check_type (2, rs,
11240 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11241 unsigned sizebits = et.size >> 3;
11242 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
11243 int logsize = neon_logbits (et.size);
11244 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
11245 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11246 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11247 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11248 inst.instruction |= LOW4 (dm);
11249 inst.instruction |= HI1 (dm) << 5;
11250 inst.instruction |= (rs == NS_QS) << 6;
11251 inst.instruction |= x << 17;
11252 inst.instruction |= sizebits << 16;
11253
11254 inst.instruction = neon_dp_fixup (inst.instruction);
11255 }
11256 else
11257 {
11258 enum neon_shape rs = neon_check_shape (NS_DR_QR);
11259 struct neon_type_el et = neon_check_type (1, rs,
11260 N_8 | N_16 | N_32 | N_KEY);
11261 unsigned save_cond = inst.instruction & 0xf0000000;
11262 /* Duplicate ARM register to lanes of vector. */
11263 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
11264 switch (et.size)
11265 {
11266 case 8: inst.instruction |= 0x400000; break;
11267 case 16: inst.instruction |= 0x000020; break;
11268 case 32: inst.instruction |= 0x000000; break;
11269 default: break;
11270 }
11271 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11272 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
11273 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
11274 inst.instruction |= (rs == NS_QR) << 21;
11275 /* The encoding for this instruction is identical for the ARM and Thumb
11276 variants, except for the condition field. */
11277 if (thumb_mode)
11278 inst.instruction |= 0xe0000000;
11279 else
11280 inst.instruction |= save_cond;
11281 }
11282 }
11283
11284 /* VMOV has particularly many variations. It can be one of:
11285 0. VMOV<c><q> <Qd>, <Qm>
11286 1. VMOV<c><q> <Dd>, <Dm>
11287 (Register operations, which are VORR with Rm = Rn.)
11288 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11289 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11290 (Immediate loads.)
11291 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11292 (ARM register to scalar.)
11293 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11294 (Two ARM registers to vector.)
11295 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11296 (Scalar to ARM register.)
11297 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11298 (Vector to two ARM registers.)
11299
11300 We should have just enough information to be able to disambiguate most of
11301 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11302 registers" cases. For these, abuse the .regisimm operand field to signify a
11303 Neon register.
11304
11305 All the encoded bits are hardcoded by this function.
11306
11307 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11308 can specify a type where it doesn't make sense to, and is ignored).
11309 */
11310
11311 static void
11312 do_neon_mov (void)
11313 {
11314 int nargs = inst.operands[0].present + inst.operands[1].present
11315 + inst.operands[2].present;
11316 unsigned save_cond = thumb_mode ? 0xe0000000 : inst.instruction & 0xf0000000;
11317
11318 switch (nargs)
11319 {
11320 case 2:
11321 /* Cases 0, 1, 2, 3, 4, 6. */
11322 if (inst.operands[1].isscalar)
11323 {
11324 /* Case 6. */
11325 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11326 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
11327 unsigned logsize = neon_logbits (et.size);
11328 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
11329 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
11330 unsigned abcdebits = 0;
11331
11332 constraint (et.type == NT_invtype, _("bad type for scalar"));
11333 constraint (x >= 64 / et.size, _("scalar index out of range"));
11334
11335 switch (et.size)
11336 {
11337 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
11338 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
11339 case 32: abcdebits = 0x00; break;
11340 default: ;
11341 }
11342
11343 abcdebits |= x << logsize;
11344 inst.instruction = save_cond;
11345 inst.instruction |= 0xe100b10;
11346 inst.instruction |= LOW4 (dn) << 16;
11347 inst.instruction |= HI1 (dn) << 7;
11348 inst.instruction |= inst.operands[0].reg << 12;
11349 inst.instruction |= (abcdebits & 3) << 5;
11350 inst.instruction |= (abcdebits >> 2) << 21;
11351 }
11352 else if (inst.operands[1].isreg)
11353 {
11354 /* Cases 0, 1, 4. */
11355 if (inst.operands[0].isscalar)
11356 {
11357 /* Case 4. */
11358 unsigned bcdebits = 0;
11359 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11360 N_8 | N_16 | N_32 | N_KEY, N_EQK);
11361 int logsize = neon_logbits (et.size);
11362 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
11363 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
11364
11365 constraint (et.type == NT_invtype, _("bad type for scalar"));
11366 constraint (x >= 64 / et.size, _("scalar index out of range"));
11367
11368 switch (et.size)
11369 {
11370 case 8: bcdebits = 0x8; break;
11371 case 16: bcdebits = 0x1; break;
11372 case 32: bcdebits = 0x0; break;
11373 default: ;
11374 }
11375
11376 bcdebits |= x << logsize;
11377 inst.instruction = save_cond;
11378 inst.instruction |= 0xe000b10;
11379 inst.instruction |= LOW4 (dn) << 16;
11380 inst.instruction |= HI1 (dn) << 7;
11381 inst.instruction |= inst.operands[1].reg << 12;
11382 inst.instruction |= (bcdebits & 3) << 5;
11383 inst.instruction |= (bcdebits >> 2) << 21;
11384 }
11385 else
11386 {
11387 /* Cases 0, 1. */
11388 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11389 /* The architecture manual I have doesn't explicitly state which
11390 value the U bit should have for register->register moves, but
11391 the equivalent VORR instruction has U = 0, so do that. */
11392 inst.instruction = 0x0200110;
11393 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11394 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11395 inst.instruction |= LOW4 (inst.operands[1].reg);
11396 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11397 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11398 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11399 inst.instruction |= (rs == NS_QQ) << 6;
11400
11401 inst.instruction = neon_dp_fixup (inst.instruction);
11402 }
11403 }
11404 else
11405 {
11406 /* Cases 2, 3. */
11407 inst.instruction = 0x0800010;
11408 neon_move_immediate ();
11409 inst.instruction = neon_dp_fixup (inst.instruction);
11410 }
11411 break;
11412
11413 case 3:
11414 /* Cases 5, 7. */
11415 if (inst.operands[0].regisimm)
11416 {
11417 /* Case 5. */
11418 inst.instruction = save_cond;
11419 inst.instruction |= 0xc400b10;
11420 inst.instruction |= LOW4 (inst.operands[0].reg);
11421 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
11422 inst.instruction |= inst.operands[1].reg << 12;
11423 inst.instruction |= inst.operands[2].reg << 16;
11424 }
11425 else
11426 {
11427 /* Case 7. */
11428 inst.instruction = save_cond;
11429 inst.instruction |= 0xc500b10;
11430 inst.instruction |= inst.operands[0].reg << 12;
11431 inst.instruction |= inst.operands[1].reg << 16;
11432 inst.instruction |= LOW4 (inst.operands[2].reg);
11433 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11434 }
11435 break;
11436
11437 default:
11438 abort ();
11439 }
11440 }
11441
11442 static void
11443 do_neon_rshift_round_imm (void)
11444 {
11445 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11446 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11447 int imm = inst.operands[2].imm;
11448
11449 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11450 if (imm == 0)
11451 {
11452 inst.operands[2].present = 0;
11453 do_neon_mov ();
11454 return;
11455 }
11456
11457 constraint (imm < 1 || (unsigned)imm > et.size,
11458 _("immediate out of range for shift"));
11459 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
11460 et.size - imm);
11461 }
11462
11463 static void
11464 do_neon_movl (void)
11465 {
11466 struct neon_type_el et = neon_check_type (2, NS_QD,
11467 N_EQK | N_DBL, N_SU_32 | N_KEY);
11468 unsigned sizebits = et.size >> 3;
11469 inst.instruction |= sizebits << 19;
11470 neon_two_same (0, et.type == NT_unsigned, -1);
11471 }
11472
11473 static void
11474 do_neon_trn (void)
11475 {
11476 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11477 struct neon_type_el et = neon_check_type (2, rs,
11478 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11479 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11480 neon_two_same (rs == NS_QQ, 1, et.size);
11481 }
11482
11483 static void
11484 do_neon_zip_uzp (void)
11485 {
11486 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11487 struct neon_type_el et = neon_check_type (2, rs,
11488 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11489 if (rs == NS_DD && et.size == 32)
11490 {
11491 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11492 inst.instruction = N_MNEM_vtrn;
11493 do_neon_trn ();
11494 return;
11495 }
11496 neon_two_same (rs == NS_QQ, 1, et.size);
11497 }
11498
11499 static void
11500 do_neon_sat_abs_neg (void)
11501 {
11502 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11503 struct neon_type_el et = neon_check_type (2, rs,
11504 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11505 neon_two_same (rs == NS_QQ, 1, et.size);
11506 }
11507
11508 static void
11509 do_neon_pair_long (void)
11510 {
11511 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11512 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
11513 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11514 inst.instruction |= (et.type == NT_unsigned) << 7;
11515 neon_two_same (rs == NS_QQ, 1, et.size);
11516 }
11517
11518 static void
11519 do_neon_recip_est (void)
11520 {
11521 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11522 struct neon_type_el et = neon_check_type (2, rs,
11523 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
11524 inst.instruction |= (et.type == NT_float) << 8;
11525 neon_two_same (rs == NS_QQ, 1, et.size);
11526 }
11527
11528 static void
11529 do_neon_cls (void)
11530 {
11531 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11532 struct neon_type_el et = neon_check_type (2, rs,
11533 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11534 neon_two_same (rs == NS_QQ, 1, et.size);
11535 }
11536
11537 static void
11538 do_neon_clz (void)
11539 {
11540 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11541 struct neon_type_el et = neon_check_type (2, rs,
11542 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
11543 neon_two_same (rs == NS_QQ, 1, et.size);
11544 }
11545
11546 static void
11547 do_neon_cnt (void)
11548 {
11549 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11550 struct neon_type_el et = neon_check_type (2, rs,
11551 N_EQK | N_INT, N_8 | N_KEY);
11552 neon_two_same (rs == NS_QQ, 1, et.size);
11553 }
11554
11555 static void
11556 do_neon_swp (void)
11557 {
11558 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11559 neon_two_same (rs == NS_QQ, 1, -1);
11560 }
11561
11562 static void
11563 do_neon_tbl_tbx (void)
11564 {
11565 unsigned listlenbits;
11566 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
11567
11568 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
11569 {
11570 first_error (_("bad list length for table lookup"));
11571 return;
11572 }
11573
11574 listlenbits = inst.operands[1].imm - 1;
11575 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11576 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11577 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11578 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11579 inst.instruction |= LOW4 (inst.operands[2].reg);
11580 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11581 inst.instruction |= listlenbits << 8;
11582
11583 inst.instruction = neon_dp_fixup (inst.instruction);
11584 }
11585
11586 static void
11587 do_neon_ldm_stm (void)
11588 {
11589 /* P, U and L bits are part of bitmask. */
11590 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
11591 unsigned offsetbits = inst.operands[1].imm * 2;
11592
11593 constraint (is_dbmode && !inst.operands[0].writeback,
11594 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11595
11596 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
11597 _("register list must contain at least 1 and at most 16 "
11598 "registers"));
11599
11600 inst.instruction |= inst.operands[0].reg << 16;
11601 inst.instruction |= inst.operands[0].writeback << 21;
11602 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11603 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
11604
11605 inst.instruction |= offsetbits;
11606
11607 if (thumb_mode)
11608 inst.instruction |= 0xe0000000;
11609 }
11610
11611 static void
11612 do_neon_ldr_str (void)
11613 {
11614 unsigned offsetbits;
11615 int offset_up = 1;
11616 int is_ldr = (inst.instruction & (1 << 20)) != 0;
11617
11618 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11619 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11620
11621 constraint (inst.reloc.pc_rel && !is_ldr,
11622 _("PC-relative addressing unavailable with VSTR"));
11623
11624 constraint (!inst.reloc.pc_rel && inst.reloc.exp.X_op != O_constant,
11625 _("Immediate value must be a constant"));
11626
11627 if (inst.reloc.exp.X_add_number < 0)
11628 {
11629 offset_up = 0;
11630 offsetbits = -inst.reloc.exp.X_add_number / 4;
11631 }
11632 else
11633 offsetbits = inst.reloc.exp.X_add_number / 4;
11634
11635 /* FIXME: Does this catch everything? */
11636 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11637 || inst.operands[1].postind || inst.operands[1].writeback
11638 || inst.operands[1].immisreg || inst.operands[1].shifted,
11639 BAD_ADDR_MODE);
11640 constraint ((inst.operands[1].imm & 3) != 0,
11641 _("Offset must be a multiple of 4"));
11642 constraint (offsetbits != (offsetbits & 0xff),
11643 _("Immediate offset out of range"));
11644
11645 inst.instruction |= inst.operands[1].reg << 16;
11646 inst.instruction |= offsetbits & 0xff;
11647 inst.instruction |= offset_up << 23;
11648
11649 if (thumb_mode)
11650 inst.instruction |= 0xe0000000;
11651
11652 if (inst.reloc.pc_rel)
11653 {
11654 if (thumb_mode)
11655 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
11656 else
11657 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
11658 }
11659 else
11660 inst.reloc.type = BFD_RELOC_UNUSED;
11661 }
11662
11663 /* "interleave" version also handles non-interleaving register VLD1/VST1
11664 instructions. */
11665
11666 static void
11667 do_neon_ld_st_interleave (void)
11668 {
11669 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
11670 N_8 | N_16 | N_32 | N_64);
11671 unsigned alignbits = 0;
11672 unsigned idx;
11673 /* The bits in this table go:
11674 0: register stride of one (0) or two (1)
11675 1,2: register list length, minus one (1, 2, 3, 4).
11676 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11677 We use -1 for invalid entries. */
11678 const int typetable[] =
11679 {
11680 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11681 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11682 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11683 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11684 };
11685 int typebits;
11686
11687 if (et.type == NT_invtype)
11688 return;
11689
11690 if (inst.operands[1].immisalign)
11691 switch (inst.operands[1].imm >> 8)
11692 {
11693 case 64: alignbits = 1; break;
11694 case 128:
11695 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11696 goto bad_alignment;
11697 alignbits = 2;
11698 break;
11699 case 256:
11700 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11701 goto bad_alignment;
11702 alignbits = 3;
11703 break;
11704 default:
11705 bad_alignment:
11706 first_error (_("bad alignment"));
11707 return;
11708 }
11709
11710 inst.instruction |= alignbits << 4;
11711 inst.instruction |= neon_logbits (et.size) << 6;
11712
11713 /* Bits [4:6] of the immediate in a list specifier encode register stride
11714 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11715 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11716 up the right value for "type" in a table based on this value and the given
11717 list style, then stick it back. */
11718 idx = ((inst.operands[0].imm >> 4) & 7)
11719 | (((inst.instruction >> 8) & 3) << 3);
11720
11721 typebits = typetable[idx];
11722
11723 constraint (typebits == -1, _("bad list type for instruction"));
11724
11725 inst.instruction &= ~0xf00;
11726 inst.instruction |= typebits << 8;
11727 }
11728
11729 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11730 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11731 otherwise. The variable arguments are a list of pairs of legal (size, align)
11732 values, terminated with -1. */
11733
11734 static int
11735 neon_alignment_bit (int size, int align, int *do_align, ...)
11736 {
11737 va_list ap;
11738 int result = FAIL, thissize, thisalign;
11739
11740 if (!inst.operands[1].immisalign)
11741 {
11742 *do_align = 0;
11743 return SUCCESS;
11744 }
11745
11746 va_start (ap, do_align);
11747
11748 do
11749 {
11750 thissize = va_arg (ap, int);
11751 if (thissize == -1)
11752 break;
11753 thisalign = va_arg (ap, int);
11754
11755 if (size == thissize && align == thisalign)
11756 result = SUCCESS;
11757 }
11758 while (result != SUCCESS);
11759
11760 va_end (ap);
11761
11762 if (result == SUCCESS)
11763 *do_align = 1;
11764 else
11765 first_error (_("unsupported alignment for instruction"));
11766
11767 return result;
11768 }
11769
11770 static void
11771 do_neon_ld_st_lane (void)
11772 {
11773 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11774 int align_good, do_align = 0;
11775 int logsize = neon_logbits (et.size);
11776 int align = inst.operands[1].imm >> 8;
11777 int n = (inst.instruction >> 8) & 3;
11778 int max_el = 64 / et.size;
11779
11780 if (et.type == NT_invtype)
11781 return;
11782
11783 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
11784 _("bad list length"));
11785 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
11786 _("scalar index out of range"));
11787 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
11788 && et.size == 8,
11789 _("stride of 2 unavailable when element size is 8"));
11790
11791 switch (n)
11792 {
11793 case 0: /* VLD1 / VST1. */
11794 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
11795 32, 32, -1);
11796 if (align_good == FAIL)
11797 return;
11798 if (do_align)
11799 {
11800 unsigned alignbits = 0;
11801 switch (et.size)
11802 {
11803 case 16: alignbits = 0x1; break;
11804 case 32: alignbits = 0x3; break;
11805 default: ;
11806 }
11807 inst.instruction |= alignbits << 4;
11808 }
11809 break;
11810
11811 case 1: /* VLD2 / VST2. */
11812 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
11813 32, 64, -1);
11814 if (align_good == FAIL)
11815 return;
11816 if (do_align)
11817 inst.instruction |= 1 << 4;
11818 break;
11819
11820 case 2: /* VLD3 / VST3. */
11821 constraint (inst.operands[1].immisalign,
11822 _("can't use alignment with this instruction"));
11823 break;
11824
11825 case 3: /* VLD4 / VST4. */
11826 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11827 16, 64, 32, 64, 32, 128, -1);
11828 if (align_good == FAIL)
11829 return;
11830 if (do_align)
11831 {
11832 unsigned alignbits = 0;
11833 switch (et.size)
11834 {
11835 case 8: alignbits = 0x1; break;
11836 case 16: alignbits = 0x1; break;
11837 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
11838 default: ;
11839 }
11840 inst.instruction |= alignbits << 4;
11841 }
11842 break;
11843
11844 default: ;
11845 }
11846
11847 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11848 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11849 inst.instruction |= 1 << (4 + logsize);
11850
11851 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
11852 inst.instruction |= logsize << 10;
11853 }
11854
11855 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11856
11857 static void
11858 do_neon_ld_dup (void)
11859 {
11860 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11861 int align_good, do_align = 0;
11862
11863 if (et.type == NT_invtype)
11864 return;
11865
11866 switch ((inst.instruction >> 8) & 3)
11867 {
11868 case 0: /* VLD1. */
11869 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
11870 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11871 &do_align, 16, 16, 32, 32, -1);
11872 if (align_good == FAIL)
11873 return;
11874 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
11875 {
11876 case 1: break;
11877 case 2: inst.instruction |= 1 << 5; break;
11878 default: first_error (_("bad list length")); return;
11879 }
11880 inst.instruction |= neon_logbits (et.size) << 6;
11881 break;
11882
11883 case 1: /* VLD2. */
11884 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11885 &do_align, 8, 16, 16, 32, 32, 64, -1);
11886 if (align_good == FAIL)
11887 return;
11888 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
11889 _("bad list length"));
11890 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11891 inst.instruction |= 1 << 5;
11892 inst.instruction |= neon_logbits (et.size) << 6;
11893 break;
11894
11895 case 2: /* VLD3. */
11896 constraint (inst.operands[1].immisalign,
11897 _("can't use alignment with this instruction"));
11898 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
11899 _("bad list length"));
11900 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11901 inst.instruction |= 1 << 5;
11902 inst.instruction |= neon_logbits (et.size) << 6;
11903 break;
11904
11905 case 3: /* VLD4. */
11906 {
11907 int align = inst.operands[1].imm >> 8;
11908 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11909 16, 64, 32, 64, 32, 128, -1);
11910 if (align_good == FAIL)
11911 return;
11912 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
11913 _("bad list length"));
11914 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11915 inst.instruction |= 1 << 5;
11916 if (et.size == 32 && align == 128)
11917 inst.instruction |= 0x3 << 6;
11918 else
11919 inst.instruction |= neon_logbits (et.size) << 6;
11920 }
11921 break;
11922
11923 default: ;
11924 }
11925
11926 inst.instruction |= do_align << 4;
11927 }
11928
11929 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
11930 apart from bits [11:4]. */
11931
11932 static void
11933 do_neon_ldx_stx (void)
11934 {
11935 switch (NEON_LANE (inst.operands[0].imm))
11936 {
11937 case NEON_INTERLEAVE_LANES:
11938 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
11939 do_neon_ld_st_interleave ();
11940 break;
11941
11942 case NEON_ALL_LANES:
11943 inst.instruction = NEON_ENC_DUP (inst.instruction);
11944 do_neon_ld_dup ();
11945 break;
11946
11947 default:
11948 inst.instruction = NEON_ENC_LANE (inst.instruction);
11949 do_neon_ld_st_lane ();
11950 }
11951
11952 /* L bit comes from bit mask. */
11953 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11954 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11955 inst.instruction |= inst.operands[1].reg << 16;
11956
11957 if (inst.operands[1].postind)
11958 {
11959 int postreg = inst.operands[1].imm & 0xf;
11960 constraint (!inst.operands[1].immisreg,
11961 _("post-index must be a register"));
11962 constraint (postreg == 0xd || postreg == 0xf,
11963 _("bad register for post-index"));
11964 inst.instruction |= postreg;
11965 }
11966 else if (inst.operands[1].writeback)
11967 {
11968 inst.instruction |= 0xd;
11969 }
11970 else
11971 inst.instruction |= 0xf;
11972
11973 if (thumb_mode)
11974 inst.instruction |= 0xf9000000;
11975 else
11976 inst.instruction |= 0xf4000000;
11977 }
11978
11979 \f
11980 /* Overall per-instruction processing. */
11981
11982 /* We need to be able to fix up arbitrary expressions in some statements.
11983 This is so that we can handle symbols that are an arbitrary distance from
11984 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
11985 which returns part of an address in a form which will be valid for
11986 a data instruction. We do this by pushing the expression into a symbol
11987 in the expr_section, and creating a fix for that. */
11988
11989 static void
11990 fix_new_arm (fragS * frag,
11991 int where,
11992 short int size,
11993 expressionS * exp,
11994 int pc_rel,
11995 int reloc)
11996 {
11997 fixS * new_fix;
11998
11999 switch (exp->X_op)
12000 {
12001 case O_constant:
12002 case O_symbol:
12003 case O_add:
12004 case O_subtract:
12005 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
12006 break;
12007
12008 default:
12009 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
12010 pc_rel, reloc);
12011 break;
12012 }
12013
12014 /* Mark whether the fix is to a THUMB instruction, or an ARM
12015 instruction. */
12016 new_fix->tc_fix_data = thumb_mode;
12017 }
12018
12019 /* Create a frg for an instruction requiring relaxation. */
12020 static void
12021 output_relax_insn (void)
12022 {
12023 char * to;
12024 symbolS *sym;
12025 int offset;
12026
12027 #ifdef OBJ_ELF
12028 /* The size of the instruction is unknown, so tie the debug info to the
12029 start of the instruction. */
12030 dwarf2_emit_insn (0);
12031 #endif
12032
12033 switch (inst.reloc.exp.X_op)
12034 {
12035 case O_symbol:
12036 sym = inst.reloc.exp.X_add_symbol;
12037 offset = inst.reloc.exp.X_add_number;
12038 break;
12039 case O_constant:
12040 sym = NULL;
12041 offset = inst.reloc.exp.X_add_number;
12042 break;
12043 default:
12044 sym = make_expr_symbol (&inst.reloc.exp);
12045 offset = 0;
12046 break;
12047 }
12048 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
12049 inst.relax, sym, offset, NULL/*offset, opcode*/);
12050 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
12051 }
12052
12053 /* Write a 32-bit thumb instruction to buf. */
12054 static void
12055 put_thumb32_insn (char * buf, unsigned long insn)
12056 {
12057 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
12058 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
12059 }
12060
12061 static void
12062 output_inst (const char * str)
12063 {
12064 char * to = NULL;
12065
12066 if (inst.error)
12067 {
12068 as_bad ("%s -- `%s'", inst.error, str);
12069 return;
12070 }
12071 if (inst.relax) {
12072 output_relax_insn();
12073 return;
12074 }
12075 if (inst.size == 0)
12076 return;
12077
12078 to = frag_more (inst.size);
12079
12080 if (thumb_mode && (inst.size > THUMB_SIZE))
12081 {
12082 assert (inst.size == (2 * THUMB_SIZE));
12083 put_thumb32_insn (to, inst.instruction);
12084 }
12085 else if (inst.size > INSN_SIZE)
12086 {
12087 assert (inst.size == (2 * INSN_SIZE));
12088 md_number_to_chars (to, inst.instruction, INSN_SIZE);
12089 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
12090 }
12091 else
12092 md_number_to_chars (to, inst.instruction, inst.size);
12093
12094 if (inst.reloc.type != BFD_RELOC_UNUSED)
12095 fix_new_arm (frag_now, to - frag_now->fr_literal,
12096 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
12097 inst.reloc.type);
12098
12099 #ifdef OBJ_ELF
12100 dwarf2_emit_insn (inst.size);
12101 #endif
12102 }
12103
12104 /* Tag values used in struct asm_opcode's tag field. */
12105 enum opcode_tag
12106 {
12107 OT_unconditional, /* Instruction cannot be conditionalized.
12108 The ARM condition field is still 0xE. */
12109 OT_unconditionalF, /* Instruction cannot be conditionalized
12110 and carries 0xF in its ARM condition field. */
12111 OT_csuffix, /* Instruction takes a conditional suffix. */
12112 OT_cinfix3, /* Instruction takes a conditional infix,
12113 beginning at character index 3. (In
12114 unified mode, it becomes a suffix.) */
12115 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
12116 character index 3, even in unified mode. Used for
12117 legacy instructions where suffix and infix forms
12118 may be ambiguous. */
12119 OT_csuf_or_in3, /* Instruction takes either a conditional
12120 suffix or an infix at character index 3. */
12121 OT_odd_infix_unc, /* This is the unconditional variant of an
12122 instruction that takes a conditional infix
12123 at an unusual position. In unified mode,
12124 this variant will accept a suffix. */
12125 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
12126 are the conditional variants of instructions that
12127 take conditional infixes in unusual positions.
12128 The infix appears at character index
12129 (tag - OT_odd_infix_0). These are not accepted
12130 in unified mode. */
12131 };
12132
12133 /* Subroutine of md_assemble, responsible for looking up the primary
12134 opcode from the mnemonic the user wrote. STR points to the
12135 beginning of the mnemonic.
12136
12137 This is not simply a hash table lookup, because of conditional
12138 variants. Most instructions have conditional variants, which are
12139 expressed with a _conditional affix_ to the mnemonic. If we were
12140 to encode each conditional variant as a literal string in the opcode
12141 table, it would have approximately 20,000 entries.
12142
12143 Most mnemonics take this affix as a suffix, and in unified syntax,
12144 'most' is upgraded to 'all'. However, in the divided syntax, some
12145 instructions take the affix as an infix, notably the s-variants of
12146 the arithmetic instructions. Of those instructions, all but six
12147 have the infix appear after the third character of the mnemonic.
12148
12149 Accordingly, the algorithm for looking up primary opcodes given
12150 an identifier is:
12151
12152 1. Look up the identifier in the opcode table.
12153 If we find a match, go to step U.
12154
12155 2. Look up the last two characters of the identifier in the
12156 conditions table. If we find a match, look up the first N-2
12157 characters of the identifier in the opcode table. If we
12158 find a match, go to step CE.
12159
12160 3. Look up the fourth and fifth characters of the identifier in
12161 the conditions table. If we find a match, extract those
12162 characters from the identifier, and look up the remaining
12163 characters in the opcode table. If we find a match, go
12164 to step CM.
12165
12166 4. Fail.
12167
12168 U. Examine the tag field of the opcode structure, in case this is
12169 one of the six instructions with its conditional infix in an
12170 unusual place. If it is, the tag tells us where to find the
12171 infix; look it up in the conditions table and set inst.cond
12172 accordingly. Otherwise, this is an unconditional instruction.
12173 Again set inst.cond accordingly. Return the opcode structure.
12174
12175 CE. Examine the tag field to make sure this is an instruction that
12176 should receive a conditional suffix. If it is not, fail.
12177 Otherwise, set inst.cond from the suffix we already looked up,
12178 and return the opcode structure.
12179
12180 CM. Examine the tag field to make sure this is an instruction that
12181 should receive a conditional infix after the third character.
12182 If it is not, fail. Otherwise, undo the edits to the current
12183 line of input and proceed as for case CE. */
12184
12185 static const struct asm_opcode *
12186 opcode_lookup (char **str)
12187 {
12188 char *end, *base;
12189 char *affix;
12190 const struct asm_opcode *opcode;
12191 const struct asm_cond *cond;
12192 char save[2];
12193
12194 /* Scan up to the end of the mnemonic, which must end in white space,
12195 '.' (in unified mode only), or end of string. */
12196 for (base = end = *str; *end != '\0'; end++)
12197 if (*end == ' ' || (unified_syntax && *end == '.'))
12198 break;
12199
12200 if (end == base)
12201 return 0;
12202
12203 /* Handle a possible width suffix and/or Neon type suffix. */
12204 if (end[0] == '.')
12205 {
12206 int offset = 2;
12207
12208 if (end[1] == 'w')
12209 inst.size_req = 4;
12210 else if (end[1] == 'n')
12211 inst.size_req = 2;
12212 else
12213 offset = 0;
12214
12215 inst.vectype.elems = 0;
12216
12217 *str = end + offset;
12218
12219 if (end[offset] == '.')
12220 {
12221 /* See if we have a Neon type suffix. */
12222 if (parse_neon_type (&inst.vectype, str) == FAIL)
12223 return 0;
12224 }
12225 else if (end[offset] != '\0' && end[offset] != ' ')
12226 return 0;
12227 }
12228 else
12229 *str = end;
12230
12231 /* Look for unaffixed or special-case affixed mnemonic. */
12232 opcode = hash_find_n (arm_ops_hsh, base, end - base);
12233 if (opcode)
12234 {
12235 /* step U */
12236 if (opcode->tag < OT_odd_infix_0)
12237 {
12238 inst.cond = COND_ALWAYS;
12239 return opcode;
12240 }
12241
12242 if (unified_syntax)
12243 as_warn (_("conditional infixes are deprecated in unified syntax"));
12244 affix = base + (opcode->tag - OT_odd_infix_0);
12245 cond = hash_find_n (arm_cond_hsh, affix, 2);
12246 assert (cond);
12247
12248 inst.cond = cond->value;
12249 return opcode;
12250 }
12251
12252 /* Cannot have a conditional suffix on a mnemonic of less than two
12253 characters. */
12254 if (end - base < 3)
12255 return 0;
12256
12257 /* Look for suffixed mnemonic. */
12258 affix = end - 2;
12259 cond = hash_find_n (arm_cond_hsh, affix, 2);
12260 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
12261 if (opcode && cond)
12262 {
12263 /* step CE */
12264 switch (opcode->tag)
12265 {
12266 case OT_cinfix3_legacy:
12267 /* Ignore conditional suffixes matched on infix only mnemonics. */
12268 break;
12269
12270 case OT_cinfix3:
12271 case OT_odd_infix_unc:
12272 if (!unified_syntax)
12273 return 0;
12274 /* else fall through */
12275
12276 case OT_csuffix:
12277 case OT_csuf_or_in3:
12278 inst.cond = cond->value;
12279 return opcode;
12280
12281 case OT_unconditional:
12282 case OT_unconditionalF:
12283 if (thumb_mode)
12284 {
12285 inst.cond = cond->value;
12286 }
12287 else
12288 {
12289 /* delayed diagnostic */
12290 inst.error = BAD_COND;
12291 inst.cond = COND_ALWAYS;
12292 }
12293 return opcode;
12294
12295 default:
12296 return 0;
12297 }
12298 }
12299
12300 /* Cannot have a usual-position infix on a mnemonic of less than
12301 six characters (five would be a suffix). */
12302 if (end - base < 6)
12303 return 0;
12304
12305 /* Look for infixed mnemonic in the usual position. */
12306 affix = base + 3;
12307 cond = hash_find_n (arm_cond_hsh, affix, 2);
12308 if (!cond)
12309 return 0;
12310
12311 memcpy (save, affix, 2);
12312 memmove (affix, affix + 2, (end - affix) - 2);
12313 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
12314 memmove (affix + 2, affix, (end - affix) - 2);
12315 memcpy (affix, save, 2);
12316
12317 if (opcode && (opcode->tag == OT_cinfix3 || opcode->tag == OT_csuf_or_in3
12318 || opcode->tag == OT_cinfix3_legacy))
12319 {
12320 /* step CM */
12321 if (unified_syntax && opcode->tag == OT_cinfix3)
12322 as_warn (_("conditional infixes are deprecated in unified syntax"));
12323
12324 inst.cond = cond->value;
12325 return opcode;
12326 }
12327
12328 return 0;
12329 }
12330
12331 void
12332 md_assemble (char *str)
12333 {
12334 char *p = str;
12335 const struct asm_opcode * opcode;
12336
12337 /* Align the previous label if needed. */
12338 if (last_label_seen != NULL)
12339 {
12340 symbol_set_frag (last_label_seen, frag_now);
12341 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
12342 S_SET_SEGMENT (last_label_seen, now_seg);
12343 }
12344
12345 memset (&inst, '\0', sizeof (inst));
12346 inst.reloc.type = BFD_RELOC_UNUSED;
12347
12348 opcode = opcode_lookup (&p);
12349 if (!opcode)
12350 {
12351 /* It wasn't an instruction, but it might be a register alias of
12352 the form alias .req reg, or a Neon .dn/.qn directive. */
12353 if (!create_register_alias (str, p)
12354 && !create_neon_reg_alias (str, p))
12355 as_bad (_("bad instruction `%s'"), str);
12356
12357 return;
12358 }
12359
12360 if (thumb_mode)
12361 {
12362 arm_feature_set variant;
12363
12364 variant = cpu_variant;
12365 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12366 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
12367 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
12368 /* Check that this instruction is supported for this CPU. */
12369 if (!opcode->tvariant
12370 || (thumb_mode == 1
12371 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
12372 {
12373 as_bad (_("selected processor does not support `%s'"), str);
12374 return;
12375 }
12376 if (inst.cond != COND_ALWAYS && !unified_syntax
12377 && opcode->tencode != do_t_branch)
12378 {
12379 as_bad (_("Thumb does not support conditional execution"));
12380 return;
12381 }
12382
12383 /* Check conditional suffixes. */
12384 if (current_it_mask)
12385 {
12386 int cond;
12387 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
12388 current_it_mask <<= 1;
12389 current_it_mask &= 0x1f;
12390 /* The BKPT instruction is unconditional even in an IT block. */
12391 if (!inst.error
12392 && cond != inst.cond && opcode->tencode != do_t_bkpt)
12393 {
12394 as_bad (_("incorrect condition in IT block"));
12395 return;
12396 }
12397 }
12398 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
12399 {
12400 as_bad (_("thumb conditional instrunction not in IT block"));
12401 return;
12402 }
12403
12404 mapping_state (MAP_THUMB);
12405 inst.instruction = opcode->tvalue;
12406
12407 if (!parse_operands (p, opcode->operands))
12408 opcode->tencode ();
12409
12410 /* Clear current_it_mask at the end of an IT block. */
12411 if (current_it_mask == 0x10)
12412 current_it_mask = 0;
12413
12414 if (!(inst.error || inst.relax))
12415 {
12416 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
12417 inst.size = (inst.instruction > 0xffff ? 4 : 2);
12418 if (inst.size_req && inst.size_req != inst.size)
12419 {
12420 as_bad (_("cannot honor width suffix -- `%s'"), str);
12421 return;
12422 }
12423 }
12424 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12425 *opcode->tvariant);
12426 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12427 set those bits when Thumb-2 32-bit instructions are seen. ie.
12428 anything other than bl/blx.
12429 This is overly pessimistic for relaxable instructions. */
12430 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
12431 || inst.relax)
12432 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12433 arm_ext_v6t2);
12434 }
12435 else
12436 {
12437 /* Check that this instruction is supported for this CPU. */
12438 if (!opcode->avariant ||
12439 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
12440 {
12441 as_bad (_("selected processor does not support `%s'"), str);
12442 return;
12443 }
12444 if (inst.size_req)
12445 {
12446 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
12447 return;
12448 }
12449
12450 mapping_state (MAP_ARM);
12451 inst.instruction = opcode->avalue;
12452 if (opcode->tag == OT_unconditionalF)
12453 inst.instruction |= 0xF << 28;
12454 else
12455 inst.instruction |= inst.cond << 28;
12456 inst.size = INSN_SIZE;
12457 if (!parse_operands (p, opcode->operands))
12458 opcode->aencode ();
12459 /* Arm mode bx is marked as both v4T and v5 because it's still required
12460 on a hypothetical non-thumb v5 core. */
12461 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
12462 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
12463 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
12464 else
12465 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
12466 *opcode->avariant);
12467 }
12468 output_inst (str);
12469 }
12470
12471 /* Various frobbings of labels and their addresses. */
12472
12473 void
12474 arm_start_line_hook (void)
12475 {
12476 last_label_seen = NULL;
12477 }
12478
12479 void
12480 arm_frob_label (symbolS * sym)
12481 {
12482 last_label_seen = sym;
12483
12484 ARM_SET_THUMB (sym, thumb_mode);
12485
12486 #if defined OBJ_COFF || defined OBJ_ELF
12487 ARM_SET_INTERWORK (sym, support_interwork);
12488 #endif
12489
12490 /* Note - do not allow local symbols (.Lxxx) to be labeled
12491 as Thumb functions. This is because these labels, whilst
12492 they exist inside Thumb code, are not the entry points for
12493 possible ARM->Thumb calls. Also, these labels can be used
12494 as part of a computed goto or switch statement. eg gcc
12495 can generate code that looks like this:
12496
12497 ldr r2, [pc, .Laaa]
12498 lsl r3, r3, #2
12499 ldr r2, [r3, r2]
12500 mov pc, r2
12501
12502 .Lbbb: .word .Lxxx
12503 .Lccc: .word .Lyyy
12504 ..etc...
12505 .Laaa: .word Lbbb
12506
12507 The first instruction loads the address of the jump table.
12508 The second instruction converts a table index into a byte offset.
12509 The third instruction gets the jump address out of the table.
12510 The fourth instruction performs the jump.
12511
12512 If the address stored at .Laaa is that of a symbol which has the
12513 Thumb_Func bit set, then the linker will arrange for this address
12514 to have the bottom bit set, which in turn would mean that the
12515 address computation performed by the third instruction would end
12516 up with the bottom bit set. Since the ARM is capable of unaligned
12517 word loads, the instruction would then load the incorrect address
12518 out of the jump table, and chaos would ensue. */
12519 if (label_is_thumb_function_name
12520 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
12521 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
12522 {
12523 /* When the address of a Thumb function is taken the bottom
12524 bit of that address should be set. This will allow
12525 interworking between Arm and Thumb functions to work
12526 correctly. */
12527
12528 THUMB_SET_FUNC (sym, 1);
12529
12530 label_is_thumb_function_name = FALSE;
12531 }
12532
12533 #ifdef OBJ_ELF
12534 dwarf2_emit_label (sym);
12535 #endif
12536 }
12537
12538 int
12539 arm_data_in_code (void)
12540 {
12541 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
12542 {
12543 *input_line_pointer = '/';
12544 input_line_pointer += 5;
12545 *input_line_pointer = 0;
12546 return 1;
12547 }
12548
12549 return 0;
12550 }
12551
12552 char *
12553 arm_canonicalize_symbol_name (char * name)
12554 {
12555 int len;
12556
12557 if (thumb_mode && (len = strlen (name)) > 5
12558 && streq (name + len - 5, "/data"))
12559 *(name + len - 5) = 0;
12560
12561 return name;
12562 }
12563 \f
12564 /* Table of all register names defined by default. The user can
12565 define additional names with .req. Note that all register names
12566 should appear in both upper and lowercase variants. Some registers
12567 also have mixed-case names. */
12568
12569 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12570 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12571 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12572 #define REGSET(p,t) \
12573 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12574 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12575 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12576 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12577 #define REGSETH(p,t) \
12578 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12579 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12580 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12581 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12582 #define REGSET2(p,t) \
12583 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12584 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12585 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12586 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12587
12588 static const struct reg_entry reg_names[] =
12589 {
12590 /* ARM integer registers. */
12591 REGSET(r, RN), REGSET(R, RN),
12592
12593 /* ATPCS synonyms. */
12594 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
12595 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
12596 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
12597
12598 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
12599 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
12600 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
12601
12602 /* Well-known aliases. */
12603 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
12604 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
12605
12606 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
12607 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
12608
12609 /* Coprocessor numbers. */
12610 REGSET(p, CP), REGSET(P, CP),
12611
12612 /* Coprocessor register numbers. The "cr" variants are for backward
12613 compatibility. */
12614 REGSET(c, CN), REGSET(C, CN),
12615 REGSET(cr, CN), REGSET(CR, CN),
12616
12617 /* FPA registers. */
12618 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
12619 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
12620
12621 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
12622 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
12623
12624 /* VFP SP registers. */
12625 REGSET(s,VFS), REGSET(S,VFS),
12626 REGSETH(s,VFS), REGSETH(S,VFS),
12627
12628 /* VFP DP Registers. */
12629 REGSET(d,VFD), REGSET(D,VFD),
12630 /* Extra Neon DP registers. */
12631 REGSETH(d,VFD), REGSETH(D,VFD),
12632
12633 /* Neon QP registers. */
12634 REGSET2(q,NQ), REGSET2(Q,NQ),
12635
12636 /* VFP control registers. */
12637 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
12638 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
12639
12640 /* Maverick DSP coprocessor registers. */
12641 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
12642 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
12643
12644 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
12645 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
12646 REGDEF(dspsc,0,DSPSC),
12647
12648 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
12649 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
12650 REGDEF(DSPSC,0,DSPSC),
12651
12652 /* iWMMXt data registers - p0, c0-15. */
12653 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
12654
12655 /* iWMMXt control registers - p1, c0-3. */
12656 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
12657 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
12658 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
12659 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
12660
12661 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12662 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
12663 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
12664 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
12665 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
12666
12667 /* XScale accumulator registers. */
12668 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
12669 };
12670 #undef REGDEF
12671 #undef REGNUM
12672 #undef REGSET
12673
12674 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12675 within psr_required_here. */
12676 static const struct asm_psr psrs[] =
12677 {
12678 /* Backward compatibility notation. Note that "all" is no longer
12679 truly all possible PSR bits. */
12680 {"all", PSR_c | PSR_f},
12681 {"flg", PSR_f},
12682 {"ctl", PSR_c},
12683
12684 /* Individual flags. */
12685 {"f", PSR_f},
12686 {"c", PSR_c},
12687 {"x", PSR_x},
12688 {"s", PSR_s},
12689 /* Combinations of flags. */
12690 {"fs", PSR_f | PSR_s},
12691 {"fx", PSR_f | PSR_x},
12692 {"fc", PSR_f | PSR_c},
12693 {"sf", PSR_s | PSR_f},
12694 {"sx", PSR_s | PSR_x},
12695 {"sc", PSR_s | PSR_c},
12696 {"xf", PSR_x | PSR_f},
12697 {"xs", PSR_x | PSR_s},
12698 {"xc", PSR_x | PSR_c},
12699 {"cf", PSR_c | PSR_f},
12700 {"cs", PSR_c | PSR_s},
12701 {"cx", PSR_c | PSR_x},
12702 {"fsx", PSR_f | PSR_s | PSR_x},
12703 {"fsc", PSR_f | PSR_s | PSR_c},
12704 {"fxs", PSR_f | PSR_x | PSR_s},
12705 {"fxc", PSR_f | PSR_x | PSR_c},
12706 {"fcs", PSR_f | PSR_c | PSR_s},
12707 {"fcx", PSR_f | PSR_c | PSR_x},
12708 {"sfx", PSR_s | PSR_f | PSR_x},
12709 {"sfc", PSR_s | PSR_f | PSR_c},
12710 {"sxf", PSR_s | PSR_x | PSR_f},
12711 {"sxc", PSR_s | PSR_x | PSR_c},
12712 {"scf", PSR_s | PSR_c | PSR_f},
12713 {"scx", PSR_s | PSR_c | PSR_x},
12714 {"xfs", PSR_x | PSR_f | PSR_s},
12715 {"xfc", PSR_x | PSR_f | PSR_c},
12716 {"xsf", PSR_x | PSR_s | PSR_f},
12717 {"xsc", PSR_x | PSR_s | PSR_c},
12718 {"xcf", PSR_x | PSR_c | PSR_f},
12719 {"xcs", PSR_x | PSR_c | PSR_s},
12720 {"cfs", PSR_c | PSR_f | PSR_s},
12721 {"cfx", PSR_c | PSR_f | PSR_x},
12722 {"csf", PSR_c | PSR_s | PSR_f},
12723 {"csx", PSR_c | PSR_s | PSR_x},
12724 {"cxf", PSR_c | PSR_x | PSR_f},
12725 {"cxs", PSR_c | PSR_x | PSR_s},
12726 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
12727 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
12728 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
12729 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
12730 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
12731 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
12732 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
12733 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
12734 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
12735 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
12736 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
12737 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
12738 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
12739 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
12740 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
12741 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
12742 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
12743 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
12744 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
12745 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
12746 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
12747 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
12748 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
12749 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
12750 };
12751
12752 /* Table of V7M psr names. */
12753 static const struct asm_psr v7m_psrs[] =
12754 {
12755 {"apsr", 0 },
12756 {"iapsr", 1 },
12757 {"eapsr", 2 },
12758 {"psr", 3 },
12759 {"ipsr", 5 },
12760 {"epsr", 6 },
12761 {"iepsr", 7 },
12762 {"msp", 8 },
12763 {"psp", 9 },
12764 {"primask", 16},
12765 {"basepri", 17},
12766 {"basepri_max", 18},
12767 {"faultmask", 19},
12768 {"control", 20}
12769 };
12770
12771 /* Table of all shift-in-operand names. */
12772 static const struct asm_shift_name shift_names [] =
12773 {
12774 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
12775 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
12776 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
12777 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
12778 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
12779 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
12780 };
12781
12782 /* Table of all explicit relocation names. */
12783 #ifdef OBJ_ELF
12784 static struct reloc_entry reloc_names[] =
12785 {
12786 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
12787 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
12788 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
12789 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
12790 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
12791 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
12792 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
12793 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
12794 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
12795 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
12796 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
12797 };
12798 #endif
12799
12800 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12801 static const struct asm_cond conds[] =
12802 {
12803 {"eq", 0x0},
12804 {"ne", 0x1},
12805 {"cs", 0x2}, {"hs", 0x2},
12806 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12807 {"mi", 0x4},
12808 {"pl", 0x5},
12809 {"vs", 0x6},
12810 {"vc", 0x7},
12811 {"hi", 0x8},
12812 {"ls", 0x9},
12813 {"ge", 0xa},
12814 {"lt", 0xb},
12815 {"gt", 0xc},
12816 {"le", 0xd},
12817 {"al", 0xe}
12818 };
12819
12820 static struct asm_barrier_opt barrier_opt_names[] =
12821 {
12822 { "sy", 0xf },
12823 { "un", 0x7 },
12824 { "st", 0xe },
12825 { "unst", 0x6 }
12826 };
12827
12828 /* Table of ARM-format instructions. */
12829
12830 /* Macros for gluing together operand strings. N.B. In all cases
12831 other than OPS0, the trailing OP_stop comes from default
12832 zero-initialization of the unspecified elements of the array. */
12833 #define OPS0() { OP_stop, }
12834 #define OPS1(a) { OP_##a, }
12835 #define OPS2(a,b) { OP_##a,OP_##b, }
12836 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12837 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12838 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12839 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12840
12841 /* These macros abstract out the exact format of the mnemonic table and
12842 save some repeated characters. */
12843
12844 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12845 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12846 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12847 THUMB_VARIANT, do_##ae, do_##te }
12848
12849 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12850 a T_MNEM_xyz enumerator. */
12851 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12852 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12853 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12854 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12855
12856 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12857 infix after the third character. */
12858 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12859 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12860 THUMB_VARIANT, do_##ae, do_##te }
12861 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12862 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12863 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12864 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12865
12866 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12867 appear in the condition table. */
12868 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12869 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12870 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12871
12872 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12873 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12874 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12875 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12876 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12877 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
12878 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
12879 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
12880 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
12881 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
12882 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
12883 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
12884 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
12885 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
12886 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
12887 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
12888 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
12889 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
12890 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
12891 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
12892
12893 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
12894 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
12895 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
12896 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
12897
12898 /* Mnemonic that cannot be conditionalized. The ARM condition-code
12899 field is still 0xE. Many of the Thumb variants can be executed
12900 conditionally, so this is checked separately. */
12901 #define TUE(mnem, op, top, nops, ops, ae, te) \
12902 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
12903 THUMB_VARIANT, do_##ae, do_##te }
12904
12905 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
12906 condition code field. */
12907 #define TUF(mnem, op, top, nops, ops, ae, te) \
12908 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
12909 THUMB_VARIANT, do_##ae, do_##te }
12910
12911 /* ARM-only variants of all the above. */
12912 #define CE(mnem, op, nops, ops, ae) \
12913 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12914
12915 #define C3(mnem, op, nops, ops, ae) \
12916 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12917
12918 /* Legacy mnemonics that always have conditional infix after the third
12919 character. */
12920 #define CL(mnem, op, nops, ops, ae) \
12921 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12922 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12923
12924 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
12925 #define cCE(mnem, op, nops, ops, ae) \
12926 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12927
12928 /* Legacy coprocessor instructions where conditional infix and conditional
12929 suffix are ambiguous. For consistency this includes all FPA instructions,
12930 not just the potentially ambiguous ones. */
12931 #define cCL(mnem, op, nops, ops, ae) \
12932 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12933 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12934
12935 /* Coprocessor, takes either a suffix or a position-3 infix
12936 (for an FPA corner case). */
12937 #define C3E(mnem, op, nops, ops, ae) \
12938 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
12939 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12940
12941 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
12942 { #m1 #m2 #m3, OPS##nops ops, \
12943 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12944 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12945
12946 #define CM(m1, m2, op, nops, ops, ae) \
12947 xCM_(m1, , m2, op, nops, ops, ae), \
12948 xCM_(m1, eq, m2, op, nops, ops, ae), \
12949 xCM_(m1, ne, m2, op, nops, ops, ae), \
12950 xCM_(m1, cs, m2, op, nops, ops, ae), \
12951 xCM_(m1, hs, m2, op, nops, ops, ae), \
12952 xCM_(m1, cc, m2, op, nops, ops, ae), \
12953 xCM_(m1, ul, m2, op, nops, ops, ae), \
12954 xCM_(m1, lo, m2, op, nops, ops, ae), \
12955 xCM_(m1, mi, m2, op, nops, ops, ae), \
12956 xCM_(m1, pl, m2, op, nops, ops, ae), \
12957 xCM_(m1, vs, m2, op, nops, ops, ae), \
12958 xCM_(m1, vc, m2, op, nops, ops, ae), \
12959 xCM_(m1, hi, m2, op, nops, ops, ae), \
12960 xCM_(m1, ls, m2, op, nops, ops, ae), \
12961 xCM_(m1, ge, m2, op, nops, ops, ae), \
12962 xCM_(m1, lt, m2, op, nops, ops, ae), \
12963 xCM_(m1, gt, m2, op, nops, ops, ae), \
12964 xCM_(m1, le, m2, op, nops, ops, ae), \
12965 xCM_(m1, al, m2, op, nops, ops, ae)
12966
12967 #define UE(mnem, op, nops, ops, ae) \
12968 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12969
12970 #define UF(mnem, op, nops, ops, ae) \
12971 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12972
12973 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
12974 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
12975 use the same encoding function for each. */
12976 #define NUF(mnem, op, nops, ops, enc) \
12977 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
12978 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12979
12980 /* Neon data processing, version which indirects through neon_enc_tab for
12981 the various overloaded versions of opcodes. */
12982 #define nUF(mnem, op, nops, ops, enc) \
12983 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
12984 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12985
12986 /* Neon insn with conditional suffix for the ARM version, non-overloaded
12987 version. */
12988 #define NCE(mnem, op, nops, ops, enc) \
12989 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
12990 THUMB_VARIANT, do_##enc, do_##enc }
12991
12992 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
12993 #define nCE(mnem, op, nops, ops, enc) \
12994 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
12995 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12996
12997 #define do_0 0
12998
12999 /* Thumb-only, unconditional. */
13000 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13001
13002 static const struct asm_opcode insns[] =
13003 {
13004 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13005 #define THUMB_VARIANT &arm_ext_v4t
13006 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
13007 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
13008 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
13009 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
13010 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
13011 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
13012 tCE(add, 0800000, add, 3, (RR, oRR, SH), arit, t_add_sub),
13013 tC3(adds, 0900000, adds, 3, (RR, oRR, SH), arit, t_add_sub),
13014 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
13015 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
13016 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
13017 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
13018 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
13019 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
13020 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
13021 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
13022
13023 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13024 for setting PSR flag bits. They are obsolete in V6 and do not
13025 have Thumb equivalents. */
13026 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13027 tC3(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13028 CL(tstp, 110f000, 2, (RR, SH), cmp),
13029 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13030 tC3(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13031 CL(cmpp, 150f000, 2, (RR, SH), cmp),
13032 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13033 tC3(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13034 CL(cmnp, 170f000, 2, (RR, SH), cmp),
13035
13036 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
13037 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
13038 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
13039 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
13040
13041 tCE(ldr, 4100000, ldr, 2, (RR, ADDR), ldst, t_ldst),
13042 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDR), ldst, t_ldst),
13043 tCE(str, 4000000, str, 2, (RR, ADDR), ldst, t_ldst),
13044 tC3(strb, 4400000, strb, 2, (RR, ADDR), ldst, t_ldst),
13045
13046 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13047 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13048 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13049 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13050 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13051 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13052
13053 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
13054 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
13055 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
13056 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
13057
13058 /* Pseudo ops. */
13059 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
13060 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
13061 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
13062
13063 /* Thumb-compatibility pseudo ops. */
13064 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
13065 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
13066 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
13067 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
13068 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
13069 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
13070 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
13071 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
13072 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
13073 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
13074 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
13075 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
13076
13077 #undef THUMB_VARIANT
13078 #define THUMB_VARIANT &arm_ext_v6
13079 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
13080
13081 /* V1 instructions with no Thumb analogue prior to V6T2. */
13082 #undef THUMB_VARIANT
13083 #define THUMB_VARIANT &arm_ext_v6t2
13084 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
13085 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
13086 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13087 TC3(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13088 CL(teqp, 130f000, 2, (RR, SH), cmp),
13089
13090 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
13091 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
13092 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
13093 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
13094
13095 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13096 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13097
13098 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13099 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13100
13101 /* V1 instructions with no Thumb analogue at all. */
13102 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
13103 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
13104
13105 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
13106 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
13107 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
13108 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
13109 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
13110 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
13111 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
13112 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
13113
13114 #undef ARM_VARIANT
13115 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13116 #undef THUMB_VARIANT
13117 #define THUMB_VARIANT &arm_ext_v4t
13118 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13119 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13120
13121 #undef THUMB_VARIANT
13122 #define THUMB_VARIANT &arm_ext_v6t2
13123 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13124 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
13125
13126 /* Generic coprocessor instructions. */
13127 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13128 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13129 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13130 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13131 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13132 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13133 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13134
13135 #undef ARM_VARIANT
13136 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13137 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13138 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13139
13140 #undef ARM_VARIANT
13141 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13142 TCE(mrs, 10f0000, f3ef8000, 2, (RR, PSR), mrs, t_mrs),
13143 TCE(msr, 120f000, f3808000, 2, (PSR, RR_EXi), msr, t_msr),
13144
13145 #undef ARM_VARIANT
13146 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13147 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13148 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13149 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13150 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13151 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13152 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13153 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13154 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13155
13156 #undef ARM_VARIANT
13157 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13158 #undef THUMB_VARIANT
13159 #define THUMB_VARIANT &arm_ext_v4t
13160 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDR), ldstv4, t_ldst),
13161 tC3(strh, 00000b0, strh, 2, (RR, ADDR), ldstv4, t_ldst),
13162 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13163 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13164 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13165 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13166
13167 #undef ARM_VARIANT
13168 #define ARM_VARIANT &arm_ext_v4t_5
13169 /* ARM Architecture 4T. */
13170 /* Note: bx (and blx) are required on V5, even if the processor does
13171 not support Thumb. */
13172 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
13173
13174 #undef ARM_VARIANT
13175 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13176 #undef THUMB_VARIANT
13177 #define THUMB_VARIANT &arm_ext_v5t
13178 /* Note: blx has 2 variants; the .value coded here is for
13179 BLX(2). Only this variant has conditional execution. */
13180 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
13181 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
13182
13183 #undef THUMB_VARIANT
13184 #define THUMB_VARIANT &arm_ext_v6t2
13185 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
13186 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13187 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13188 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13189 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13190 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13191 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13192 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13193
13194 #undef ARM_VARIANT
13195 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13196 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13197 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13198 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13199 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13200
13201 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13202 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13203
13204 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13205 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13206 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13207 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13208
13209 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13210 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13211 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13212 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13213
13214 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13215 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13216
13217 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13218 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13219 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13220 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13221
13222 #undef ARM_VARIANT
13223 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13224 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
13225 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13226 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13227
13228 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13229 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13230
13231 #undef ARM_VARIANT
13232 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13233 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
13234
13235 #undef ARM_VARIANT
13236 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13237 #undef THUMB_VARIANT
13238 #define THUMB_VARIANT &arm_ext_v6
13239 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
13240 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
13241 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13242 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13243 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13244 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13245 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13246 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13247 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13248 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
13249
13250 #undef THUMB_VARIANT
13251 #define THUMB_VARIANT &arm_ext_v6t2
13252 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
13253 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13254 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13255
13256 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
13257 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
13258
13259 /* ARM V6 not included in V7M (eg. integer SIMD). */
13260 #undef THUMB_VARIANT
13261 #define THUMB_VARIANT &arm_ext_v6_notm
13262 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
13263 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
13264 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
13265 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13266 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13267 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13268 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13269 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13270 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13271 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13272 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13273 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13274 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13275 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13276 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13277 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13278 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13279 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13280 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13281 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13282 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13283 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13284 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13285 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13286 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13287 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13288 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13289 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13290 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13291 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13292 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13293 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13294 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13295 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13296 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13297 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13298 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13299 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13300 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13301 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13302 UF(rfeib, 9900a00, 1, (RRw), rfe),
13303 UF(rfeda, 8100a00, 1, (RRw), rfe),
13304 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13305 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13306 UF(rfefa, 9900a00, 1, (RRw), rfe),
13307 UF(rfeea, 8100a00, 1, (RRw), rfe),
13308 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13309 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13310 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13311 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13312 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13313 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13314 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13315 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13316 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13317 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13318 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13319 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13320 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13321 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13322 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13323 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13324 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13325 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13326 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13327 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13328 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13329 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13330 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13331 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13332 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13333 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13334 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13335 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13336 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
13337 UF(srsib, 9cd0500, 1, (I31w), srs),
13338 UF(srsda, 84d0500, 1, (I31w), srs),
13339 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
13340 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
13341 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
13342 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
13343 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13344 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13345 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
13346
13347 #undef ARM_VARIANT
13348 #define ARM_VARIANT &arm_ext_v6k
13349 #undef THUMB_VARIANT
13350 #define THUMB_VARIANT &arm_ext_v6k
13351 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
13352 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
13353 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
13354 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
13355
13356 #undef THUMB_VARIANT
13357 #define THUMB_VARIANT &arm_ext_v6_notm
13358 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
13359 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
13360
13361 #undef THUMB_VARIANT
13362 #define THUMB_VARIANT &arm_ext_v6t2
13363 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13364 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13365 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13366 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13367 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
13368
13369 #undef ARM_VARIANT
13370 #define ARM_VARIANT &arm_ext_v6z
13371 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
13372
13373 #undef ARM_VARIANT
13374 #define ARM_VARIANT &arm_ext_v6t2
13375 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
13376 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
13377 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13378 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13379
13380 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13381 TCE(movw, 3000000, f2400000, 2, (RRnpc, Iffff), mov16, t_mov16),
13382 TCE(movt, 3400000, f2c00000, 2, (RRnpc, Iffff), mov16, t_mov16),
13383 TCE(rbit, 3ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
13384
13385 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13386 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13387 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13388 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13389
13390 UT(cbnz, b900, 2, (RR, EXP), t_czb),
13391 UT(cbz, b100, 2, (RR, EXP), t_czb),
13392 /* ARM does not really have an IT instruction. */
13393 TUE(it, 0, bf08, 1, (COND), it, t_it),
13394 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
13395 TUE(ite, 0, bf04, 1, (COND), it, t_it),
13396 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
13397 TUE(itet, 0, bf06, 1, (COND), it, t_it),
13398 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
13399 TUE(itee, 0, bf02, 1, (COND), it, t_it),
13400 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
13401 TUE(itett, 0, bf07, 1, (COND), it, t_it),
13402 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
13403 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
13404 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
13405 TUE(itete, 0, bf05, 1, (COND), it, t_it),
13406 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
13407 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
13408
13409 /* Thumb2 only instructions. */
13410 #undef ARM_VARIANT
13411 #define ARM_VARIANT NULL
13412
13413 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13414 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13415 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
13416 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
13417
13418 /* Thumb-2 hardware division instructions (R and M profiles only). */
13419 #undef THUMB_VARIANT
13420 #define THUMB_VARIANT &arm_ext_div
13421 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
13422 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
13423
13424 /* ARM V7 instructions. */
13425 #undef ARM_VARIANT
13426 #define ARM_VARIANT &arm_ext_v7
13427 #undef THUMB_VARIANT
13428 #define THUMB_VARIANT &arm_ext_v7
13429 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
13430 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
13431 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
13432 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
13433 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
13434
13435 #undef ARM_VARIANT
13436 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13437 cCE(wfs, e200110, 1, (RR), rd),
13438 cCE(rfs, e300110, 1, (RR), rd),
13439 cCE(wfc, e400110, 1, (RR), rd),
13440 cCE(rfc, e500110, 1, (RR), rd),
13441
13442 cCL(ldfs, c100100, 2, (RF, ADDR), rd_cpaddr),
13443 cCL(ldfd, c108100, 2, (RF, ADDR), rd_cpaddr),
13444 cCL(ldfe, c500100, 2, (RF, ADDR), rd_cpaddr),
13445 cCL(ldfp, c508100, 2, (RF, ADDR), rd_cpaddr),
13446
13447 cCL(stfs, c000100, 2, (RF, ADDR), rd_cpaddr),
13448 cCL(stfd, c008100, 2, (RF, ADDR), rd_cpaddr),
13449 cCL(stfe, c400100, 2, (RF, ADDR), rd_cpaddr),
13450 cCL(stfp, c408100, 2, (RF, ADDR), rd_cpaddr),
13451
13452 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
13453 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
13454 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
13455 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
13456 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
13457 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
13458 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
13459 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
13460 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
13461 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
13462 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
13463 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
13464
13465 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
13466 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
13467 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
13468 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
13469 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
13470 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
13471 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
13472 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
13473 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
13474 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
13475 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
13476 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
13477
13478 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
13479 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
13480 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
13481 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
13482 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
13483 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
13484 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
13485 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
13486 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
13487 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
13488 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
13489 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
13490
13491 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
13492 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
13493 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
13494 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
13495 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
13496 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
13497 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
13498 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
13499 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
13500 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
13501 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
13502 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
13503
13504 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
13505 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
13506 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
13507 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
13508 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
13509 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
13510 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
13511 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
13512 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
13513 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
13514 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
13515 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
13516
13517 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
13518 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
13519 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
13520 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
13521 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
13522 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
13523 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
13524 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
13525 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
13526 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
13527 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
13528 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
13529
13530 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
13531 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
13532 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
13533 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
13534 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
13535 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
13536 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
13537 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
13538 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
13539 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
13540 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
13541 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
13542
13543 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
13544 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
13545 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
13546 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
13547 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
13548 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
13549 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
13550 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
13551 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
13552 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
13553 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
13554 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
13555
13556 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
13557 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
13558 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
13559 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
13560 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
13561 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
13562 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
13563 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
13564 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
13565 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
13566 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
13567 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
13568
13569 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
13570 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
13571 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
13572 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
13573 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
13574 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
13575 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
13576 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
13577 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
13578 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
13579 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
13580 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
13581
13582 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
13583 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
13584 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
13585 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
13586 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
13587 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
13588 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
13589 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
13590 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
13591 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
13592 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
13593 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
13594
13595 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
13596 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
13597 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
13598 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
13599 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
13600 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
13601 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
13602 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
13603 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
13604 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
13605 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
13606 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
13607
13608 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
13609 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
13610 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
13611 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
13612 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
13613 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
13614 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
13615 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
13616 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
13617 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
13618 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
13619 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
13620
13621 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
13622 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
13623 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
13624 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
13625 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
13626 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
13627 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
13628 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
13629 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
13630 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
13631 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
13632 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
13633
13634 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
13635 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
13636 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
13637 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
13638 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
13639 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
13640 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
13641 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
13642 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
13643 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
13644 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
13645 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
13646
13647 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
13648 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
13649 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
13650 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
13651 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
13652 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
13653 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
13654 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
13655 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
13656 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
13657 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
13658 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
13659
13660 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
13661 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
13662 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
13663 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
13664 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
13665 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13666 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13667 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13668 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
13669 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
13670 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
13671 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
13672
13673 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
13674 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
13675 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
13676 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
13677 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
13678 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13679 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13680 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13681 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
13682 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
13683 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
13684 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
13685
13686 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
13687 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
13688 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
13689 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
13690 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
13691 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13692 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13693 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13694 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
13695 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
13696 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
13697 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
13698
13699 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
13700 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
13701 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
13702 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
13703 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
13704 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13705 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13706 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13707 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
13708 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
13709 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
13710 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
13711
13712 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
13713 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
13714 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
13715 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
13716 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
13717 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13718 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13719 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13720 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
13721 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
13722 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
13723 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
13724
13725 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
13726 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
13727 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
13728 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
13729 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
13730 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13731 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13732 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13733 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
13734 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
13735 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
13736 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
13737
13738 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
13739 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
13740 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
13741 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
13742 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
13743 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13744 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13745 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13746 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
13747 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
13748 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
13749 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
13750
13751 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
13752 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
13753 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
13754 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
13755 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
13756 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13757 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13758 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13759 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
13760 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
13761 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
13762 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
13763
13764 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
13765 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
13766 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
13767 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
13768 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
13769 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13770 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13771 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13772 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
13773 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
13774 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
13775 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
13776
13777 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
13778 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
13779 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
13780 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
13781 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
13782 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13783 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13784 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13785 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
13786 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
13787 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
13788 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
13789
13790 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13791 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13792 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13793 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13794 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13795 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13796 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13797 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13798 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13799 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13800 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13801 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13802
13803 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13804 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13805 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13806 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13807 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13808 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13809 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13810 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13811 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13812 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13813 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13814 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13815
13816 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13817 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13818 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13819 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13820 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13821 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13822 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13823 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13824 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13825 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13826 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13827 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13828
13829 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
13830 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
13831 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
13832 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
13833
13834 cCL(flts, e000110, 2, (RF, RR), rn_rd),
13835 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
13836 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
13837 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
13838 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
13839 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
13840 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
13841 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
13842 cCL(flte, e080110, 2, (RF, RR), rn_rd),
13843 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
13844 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
13845 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
13846
13847 /* The implementation of the FIX instruction is broken on some
13848 assemblers, in that it accepts a precision specifier as well as a
13849 rounding specifier, despite the fact that this is meaningless.
13850 To be more compatible, we accept it as well, though of course it
13851 does not set any bits. */
13852 cCE(fix, e100110, 2, (RR, RF), rd_rm),
13853 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
13854 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
13855 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
13856 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
13857 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
13858 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
13859 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
13860 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
13861 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
13862 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
13863 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
13864 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
13865
13866 /* Instructions that were new with the real FPA, call them V2. */
13867 #undef ARM_VARIANT
13868 #define ARM_VARIANT &fpu_fpa_ext_v2
13869 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13870 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13871 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13872 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13873 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13874 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13875
13876 #undef ARM_VARIANT
13877 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
13878 /* Moves and type conversions. */
13879 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
13880 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
13881 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
13882 cCE(fmstat, ef1fa10, 0, (), noargs),
13883 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
13884 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
13885 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
13886 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13887 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
13888 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13889 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
13890 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
13891
13892 /* Memory operations. */
13893 cCE(flds, d100a00, 2, (RVS, ADDR), vfp_sp_ldst),
13894 cCE(fsts, d000a00, 2, (RVS, ADDR), vfp_sp_ldst),
13895 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13896 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13897 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13898 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13899 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13900 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13901 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13902 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13903 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13904 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13905 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13906 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13907 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13908 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13909 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13910 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13911
13912 /* Monadic operations. */
13913 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
13914 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
13915 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
13916
13917 /* Dyadic operations. */
13918 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13919 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13920 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13921 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13922 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13923 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13924 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13925 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13926 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13927
13928 /* Comparisons. */
13929 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
13930 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
13931 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
13932 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
13933
13934 #undef ARM_VARIANT
13935 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
13936 /* Moves and type conversions. */
13937 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13938 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13939 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13940 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
13941 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
13942 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
13943 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
13944 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13945 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
13946 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13947 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13948 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13949 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13950
13951 /* Memory operations. */
13952 cCE(fldd, d100b00, 2, (RVD, ADDR), vfp_dp_ldst),
13953 cCE(fstd, d000b00, 2, (RVD, ADDR), vfp_dp_ldst),
13954 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13955 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13956 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13957 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13958 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13959 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13960 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13961 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13962
13963 /* Monadic operations. */
13964 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
13965 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13966 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
13967
13968 /* Dyadic operations. */
13969 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13970 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13971 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13972 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13973 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13974 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13975 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13976 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13977 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13978
13979 /* Comparisons. */
13980 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13981 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
13982 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
13983 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
13984
13985 #undef ARM_VARIANT
13986 #define ARM_VARIANT &fpu_vfp_ext_v2
13987 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
13988 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
13989 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
13990 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
13991
13992 #undef THUMB_VARIANT
13993 #define THUMB_VARIANT &fpu_neon_ext_v1
13994 #undef ARM_VARIANT
13995 #define ARM_VARIANT &fpu_neon_ext_v1
13996 /* Data processing with three registers of the same length. */
13997 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
13998 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
13999 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
14000 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14001 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14002 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14003 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14004 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14005 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14006 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14007 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14008 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14009 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14010 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14011 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14012 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14013 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14014 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14015 /* If not immediate, fall back to neon_dyadic_i64_su.
14016 shl_imm should accept I8 I16 I32 I64,
14017 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14018 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
14019 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
14020 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
14021 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
14022 /* Logic ops, types optional & ignored. */
14023 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
14024 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
14025 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
14026 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
14027 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
14028 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
14029 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
14030 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
14031 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
14032 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
14033 /* Bitfield ops, untyped. */
14034 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14035 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14036 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14037 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14038 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14039 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14040 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14041 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14042 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14043 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14044 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14045 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14046 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14047 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14048 back to neon_dyadic_if_su. */
14049 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14050 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14051 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14052 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14053 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14054 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14055 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14056 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14057 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14058 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
14059 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
14060 /* As above, D registers only. */
14061 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14062 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14063 /* Int and float variants, signedness unimportant. */
14064 /* If not scalar, fall back to neon_dyadic_if_i. */
14065 nUF(vmla, vmla, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14066 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14067 nUF(vmls, vmls, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14068 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14069 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
14070 /* Add/sub take types I8 I16 I32 I64 F32. */
14071 nUF(vadd, vadd, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14072 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14073 nUF(vsub, vsub, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14074 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14075 /* vtst takes sizes 8, 16, 32. */
14076 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
14077 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
14078 /* VMUL takes I8 I16 I32 F32 P8. */
14079 nUF(vmul, vmul, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mul),
14080 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
14081 /* VQD{R}MULH takes S16 S32. */
14082 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14083 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14084 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14085 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14086 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14087 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14088 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14089 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14090 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14091 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14092 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14093 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14094 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14095 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14096 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14097 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14098
14099 /* Two address, int/float. Types S8 S16 S32 F32. */
14100 NUF(vabs, 1b10300, 2, (RNDQ, RNDQ), neon_abs_neg),
14101 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
14102 NUF(vneg, 1b10380, 2, (RNDQ, RNDQ), neon_abs_neg),
14103 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
14104
14105 /* Data processing with two registers and a shift amount. */
14106 /* Right shifts, and variants with rounding.
14107 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14108 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14109 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14110 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14111 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14112 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14113 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14114 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14115 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14116 /* Shift and insert. Sizes accepted 8 16 32 64. */
14117 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
14118 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
14119 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
14120 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
14121 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14122 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
14123 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
14124 /* Right shift immediate, saturating & narrowing, with rounding variants.
14125 Types accepted S16 S32 S64 U16 U32 U64. */
14126 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14127 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14128 /* As above, unsigned. Types accepted S16 S32 S64. */
14129 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14130 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14131 /* Right shift narrowing. Types accepted I16 I32 I64. */
14132 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14133 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14134 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14135 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
14136 /* CVT with optional immediate for fixed-point variant. */
14137 nUF(vcvt, vcvt, 3, (RNDQ, RNDQ, oI32b), neon_cvt),
14138 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
14139
14140 /* One register and an immediate value. All encoding special-cased! */
14141 NCE(vmov, 0, 1, (VMOV), neon_mov),
14142 NCE(vmovq, 0, 1, (VMOV), neon_mov),
14143 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
14144 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
14145
14146 /* Data processing, three registers of different lengths. */
14147 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14148 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
14149 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
14150 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
14151 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
14152 /* If not scalar, fall back to neon_dyadic_long.
14153 Vector types as above, scalar types S16 S32 U16 U32. */
14154 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14155 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14156 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14157 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14158 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14159 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14160 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14161 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14162 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14163 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14164 /* Saturating doubling multiplies. Types S16 S32. */
14165 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14166 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14167 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14168 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14169 S16 S32 U16 U32. */
14170 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
14171
14172 /* Extract. Size 8. */
14173 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
14174 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
14175
14176 /* Two registers, miscellaneous. */
14177 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14178 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
14179 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
14180 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
14181 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
14182 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
14183 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
14184 /* Vector replicate. Sizes 8 16 32. */
14185 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
14186 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
14187 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14188 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
14189 /* VMOVN. Types I16 I32 I64. */
14190 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
14191 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14192 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
14193 /* VQMOVUN. Types S16 S32 S64. */
14194 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
14195 /* VZIP / VUZP. Sizes 8 16 32. */
14196 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
14197 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
14198 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
14199 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
14200 /* VQABS / VQNEG. Types S8 S16 S32. */
14201 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14202 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
14203 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14204 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
14205 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14206 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
14207 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
14208 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
14209 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
14210 /* Reciprocal estimates. Types U32 F32. */
14211 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
14212 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
14213 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
14214 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
14215 /* VCLS. Types S8 S16 S32. */
14216 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
14217 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
14218 /* VCLZ. Types I8 I16 I32. */
14219 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
14220 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
14221 /* VCNT. Size 8. */
14222 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
14223 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
14224 /* Two address, untyped. */
14225 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
14226 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
14227 /* VTRN. Sizes 8 16 32. */
14228 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
14229 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
14230
14231 /* Table lookup. Size 8. */
14232 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14233 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14234
14235 #undef THUMB_VARIANT
14236 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14237 #undef ARM_VARIANT
14238 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14239
14240 /* Load/store instructions. Available in Neon or VFPv3. */
14241 NCE(vldm, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14242 NCE(vldmia, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14243 NCE(vldmdb, d100b00, 2, (RRw, NRDLST), neon_ldm_stm),
14244 NCE(vstm, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14245 NCE(vstmia, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14246 NCE(vstmdb, d000b00, 2, (RRw, NRDLST), neon_ldm_stm),
14247 NCE(vldr, d100b00, 2, (RND, ADDR), neon_ldr_str),
14248 NCE(vstr, d000b00, 2, (RND, ADDR), neon_ldr_str),
14249
14250 /* Neon element/structure load/store. */
14251 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14252 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14253 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14254 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14255 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14256 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14257 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14258 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14259
14260 #undef THUMB_VARIANT
14261 #define THUMB_VARIANT &fpu_vfp_ext_v3
14262 #undef ARM_VARIANT
14263 #define ARM_VARIANT &fpu_vfp_ext_v3
14264
14265 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
14266 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
14267 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14268 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14269 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14270 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14271 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14272 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14273 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14274 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14275 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14276 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14277 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14278 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14279 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14280 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14281 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14282 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14283
14284 #undef THUMB_VARIANT
14285 #undef ARM_VARIANT
14286 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14287 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14288 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14289 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14290 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14291 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14292 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14293 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
14294 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
14295
14296 #undef ARM_VARIANT
14297 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14298 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
14299 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
14300 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
14301 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
14302 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
14303 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
14304 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
14305 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
14306 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
14307 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14308 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14309 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14310 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14311 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14312 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14313 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14314 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14315 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14316 cCE(tmcr, e000110, 2, (RIWC, RR), rn_rd),
14317 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
14318 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14319 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14320 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14321 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14322 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14323 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14324 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
14325 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
14326 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
14327 cCE(tmrc, e100110, 2, (RR, RIWC), rd_rn),
14328 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
14329 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
14330 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
14331 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
14332 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
14333 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
14334 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
14335 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14336 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14337 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14338 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14339 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14340 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14341 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14342 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14343 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14344 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
14345 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14346 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14347 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14348 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14349 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14350 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14351 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14352 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14353 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14354 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14355 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14356 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14357 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14358 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14359 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14360 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14361 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14362 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14363 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14364 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14365 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14366 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14367 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14368 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14369 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14370 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14371 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14372 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14373 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14374 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14375 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14376 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14377 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14378 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14379 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14380 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14381 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14382 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14383 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14384 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14385 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14386 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
14387 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14388 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14389 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14390 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14391 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14392 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14393 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14394 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14395 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14396 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14397 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14398 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14399 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14400 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14401 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14402 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14403 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14404 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14405 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14406 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14407 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14408 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
14409 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14410 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14411 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14412 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14413 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14414 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14415 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14416 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14417 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14418 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14419 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14420 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14421 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14422 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14423 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14424 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14425 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14426 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14427 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14428 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14429 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14430 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14431 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14432 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14433 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14434 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14435 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14436 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14437 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14438 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14439 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14440 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
14441 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
14442 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
14443 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
14444 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
14445 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
14446 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14447 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14448 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14449 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
14450 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
14451 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
14452 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
14453 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
14454 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
14455 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14456 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14457 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14458 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14459 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
14460
14461 #undef ARM_VARIANT
14462 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14463 cCE(cfldrs, c100400, 2, (RMF, ADDR), rd_cpaddr),
14464 cCE(cfldrd, c500400, 2, (RMD, ADDR), rd_cpaddr),
14465 cCE(cfldr32, c100500, 2, (RMFX, ADDR), rd_cpaddr),
14466 cCE(cfldr64, c500500, 2, (RMDX, ADDR), rd_cpaddr),
14467 cCE(cfstrs, c000400, 2, (RMF, ADDR), rd_cpaddr),
14468 cCE(cfstrd, c400400, 2, (RMD, ADDR), rd_cpaddr),
14469 cCE(cfstr32, c000500, 2, (RMFX, ADDR), rd_cpaddr),
14470 cCE(cfstr64, c400500, 2, (RMDX, ADDR), rd_cpaddr),
14471 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
14472 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
14473 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
14474 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
14475 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
14476 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
14477 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
14478 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
14479 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
14480 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
14481 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
14482 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
14483 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
14484 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
14485 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
14486 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
14487 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
14488 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
14489 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
14490 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
14491 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
14492 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
14493 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
14494 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
14495 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
14496 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
14497 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
14498 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
14499 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
14500 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
14501 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
14502 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
14503 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
14504 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
14505 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
14506 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
14507 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
14508 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
14509 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
14510 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
14511 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
14512 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
14513 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
14514 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
14515 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
14516 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
14517 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
14518 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
14519 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
14520 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
14521 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
14522 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
14523 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
14524 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
14525 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
14526 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
14527 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14528 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14529 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14530 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14531 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14532 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14533 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14534 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14535 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14536 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14537 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14538 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14539 };
14540 #undef ARM_VARIANT
14541 #undef THUMB_VARIANT
14542 #undef TCE
14543 #undef TCM
14544 #undef TUE
14545 #undef TUF
14546 #undef TCC
14547 #undef cCE
14548 #undef cCL
14549 #undef C3E
14550 #undef CE
14551 #undef CM
14552 #undef UE
14553 #undef UF
14554 #undef UT
14555 #undef NUF
14556 #undef nUF
14557 #undef NCE
14558 #undef nCE
14559 #undef OPS0
14560 #undef OPS1
14561 #undef OPS2
14562 #undef OPS3
14563 #undef OPS4
14564 #undef OPS5
14565 #undef OPS6
14566 #undef do_0
14567 \f
14568 /* MD interface: bits in the object file. */
14569
14570 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14571 for use in the a.out file, and stores them in the array pointed to by buf.
14572 This knows about the endian-ness of the target machine and does
14573 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14574 2 (short) and 4 (long) Floating numbers are put out as a series of
14575 LITTLENUMS (shorts, here at least). */
14576
14577 void
14578 md_number_to_chars (char * buf, valueT val, int n)
14579 {
14580 if (target_big_endian)
14581 number_to_chars_bigendian (buf, val, n);
14582 else
14583 number_to_chars_littleendian (buf, val, n);
14584 }
14585
14586 static valueT
14587 md_chars_to_number (char * buf, int n)
14588 {
14589 valueT result = 0;
14590 unsigned char * where = (unsigned char *) buf;
14591
14592 if (target_big_endian)
14593 {
14594 while (n--)
14595 {
14596 result <<= 8;
14597 result |= (*where++ & 255);
14598 }
14599 }
14600 else
14601 {
14602 while (n--)
14603 {
14604 result <<= 8;
14605 result |= (where[n] & 255);
14606 }
14607 }
14608
14609 return result;
14610 }
14611
14612 /* MD interface: Sections. */
14613
14614 /* Estimate the size of a frag before relaxing. Assume everything fits in
14615 2 bytes. */
14616
14617 int
14618 md_estimate_size_before_relax (fragS * fragp,
14619 segT segtype ATTRIBUTE_UNUSED)
14620 {
14621 fragp->fr_var = 2;
14622 return 2;
14623 }
14624
14625 /* Convert a machine dependent frag. */
14626
14627 void
14628 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
14629 {
14630 unsigned long insn;
14631 unsigned long old_op;
14632 char *buf;
14633 expressionS exp;
14634 fixS *fixp;
14635 int reloc_type;
14636 int pc_rel;
14637 int opcode;
14638
14639 buf = fragp->fr_literal + fragp->fr_fix;
14640
14641 old_op = bfd_get_16(abfd, buf);
14642 if (fragp->fr_symbol) {
14643 exp.X_op = O_symbol;
14644 exp.X_add_symbol = fragp->fr_symbol;
14645 } else {
14646 exp.X_op = O_constant;
14647 }
14648 exp.X_add_number = fragp->fr_offset;
14649 opcode = fragp->fr_subtype;
14650 switch (opcode)
14651 {
14652 case T_MNEM_ldr_pc:
14653 case T_MNEM_ldr_pc2:
14654 case T_MNEM_ldr_sp:
14655 case T_MNEM_str_sp:
14656 case T_MNEM_ldr:
14657 case T_MNEM_ldrb:
14658 case T_MNEM_ldrh:
14659 case T_MNEM_str:
14660 case T_MNEM_strb:
14661 case T_MNEM_strh:
14662 if (fragp->fr_var == 4)
14663 {
14664 insn = THUMB_OP32(opcode);
14665 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
14666 {
14667 insn |= (old_op & 0x700) << 4;
14668 }
14669 else
14670 {
14671 insn |= (old_op & 7) << 12;
14672 insn |= (old_op & 0x38) << 13;
14673 }
14674 insn |= 0x00000c00;
14675 put_thumb32_insn (buf, insn);
14676 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
14677 }
14678 else
14679 {
14680 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
14681 }
14682 pc_rel = (opcode == T_MNEM_ldr_pc2);
14683 break;
14684 case T_MNEM_adr:
14685 if (fragp->fr_var == 4)
14686 {
14687 insn = THUMB_OP32 (opcode);
14688 insn |= (old_op & 0xf0) << 4;
14689 put_thumb32_insn (buf, insn);
14690 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
14691 }
14692 else
14693 {
14694 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14695 exp.X_add_number -= 4;
14696 }
14697 pc_rel = 1;
14698 break;
14699 case T_MNEM_mov:
14700 case T_MNEM_movs:
14701 case T_MNEM_cmp:
14702 case T_MNEM_cmn:
14703 if (fragp->fr_var == 4)
14704 {
14705 int r0off = (opcode == T_MNEM_mov
14706 || opcode == T_MNEM_movs) ? 0 : 8;
14707 insn = THUMB_OP32 (opcode);
14708 insn = (insn & 0xe1ffffff) | 0x10000000;
14709 insn |= (old_op & 0x700) << r0off;
14710 put_thumb32_insn (buf, insn);
14711 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14712 }
14713 else
14714 {
14715 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
14716 }
14717 pc_rel = 0;
14718 break;
14719 case T_MNEM_b:
14720 if (fragp->fr_var == 4)
14721 {
14722 insn = THUMB_OP32(opcode);
14723 put_thumb32_insn (buf, insn);
14724 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
14725 }
14726 else
14727 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
14728 pc_rel = 1;
14729 break;
14730 case T_MNEM_bcond:
14731 if (fragp->fr_var == 4)
14732 {
14733 insn = THUMB_OP32(opcode);
14734 insn |= (old_op & 0xf00) << 14;
14735 put_thumb32_insn (buf, insn);
14736 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
14737 }
14738 else
14739 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
14740 pc_rel = 1;
14741 break;
14742 case T_MNEM_add_sp:
14743 case T_MNEM_add_pc:
14744 case T_MNEM_inc_sp:
14745 case T_MNEM_dec_sp:
14746 if (fragp->fr_var == 4)
14747 {
14748 /* ??? Choose between add and addw. */
14749 insn = THUMB_OP32 (opcode);
14750 insn |= (old_op & 0xf0) << 4;
14751 put_thumb32_insn (buf, insn);
14752 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14753 }
14754 else
14755 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14756 pc_rel = 0;
14757 break;
14758
14759 case T_MNEM_addi:
14760 case T_MNEM_addis:
14761 case T_MNEM_subi:
14762 case T_MNEM_subis:
14763 if (fragp->fr_var == 4)
14764 {
14765 insn = THUMB_OP32 (opcode);
14766 insn |= (old_op & 0xf0) << 4;
14767 insn |= (old_op & 0xf) << 16;
14768 put_thumb32_insn (buf, insn);
14769 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14770 }
14771 else
14772 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14773 pc_rel = 0;
14774 break;
14775 default:
14776 abort();
14777 }
14778 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
14779 reloc_type);
14780 fixp->fx_file = fragp->fr_file;
14781 fixp->fx_line = fragp->fr_line;
14782 fragp->fr_fix += fragp->fr_var;
14783 }
14784
14785 /* Return the size of a relaxable immediate operand instruction.
14786 SHIFT and SIZE specify the form of the allowable immediate. */
14787 static int
14788 relax_immediate (fragS *fragp, int size, int shift)
14789 {
14790 offsetT offset;
14791 offsetT mask;
14792 offsetT low;
14793
14794 /* ??? Should be able to do better than this. */
14795 if (fragp->fr_symbol)
14796 return 4;
14797
14798 low = (1 << shift) - 1;
14799 mask = (1 << (shift + size)) - (1 << shift);
14800 offset = fragp->fr_offset;
14801 /* Force misaligned offsets to 32-bit variant. */
14802 if (offset & low)
14803 return -4;
14804 if (offset & ~mask)
14805 return 4;
14806 return 2;
14807 }
14808
14809 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14810 load. */
14811 static int
14812 relax_adr (fragS *fragp, asection *sec)
14813 {
14814 addressT addr;
14815 offsetT val;
14816
14817 /* Assume worst case for symbols not known to be in the same section. */
14818 if (!S_IS_DEFINED(fragp->fr_symbol)
14819 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14820 return 4;
14821
14822 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14823 addr = fragp->fr_address + fragp->fr_fix;
14824 addr = (addr + 4) & ~3;
14825 /* Fix the insn as the 4-byte version if the target address is not
14826 sufficiently aligned. This is prevents an infinite loop when two
14827 instructions have contradictory range/alignment requirements. */
14828 if (val & 3)
14829 return -4;
14830 val -= addr;
14831 if (val < 0 || val > 1020)
14832 return 4;
14833 return 2;
14834 }
14835
14836 /* Return the size of a relaxable add/sub immediate instruction. */
14837 static int
14838 relax_addsub (fragS *fragp, asection *sec)
14839 {
14840 char *buf;
14841 int op;
14842
14843 buf = fragp->fr_literal + fragp->fr_fix;
14844 op = bfd_get_16(sec->owner, buf);
14845 if ((op & 0xf) == ((op >> 4) & 0xf))
14846 return relax_immediate (fragp, 8, 0);
14847 else
14848 return relax_immediate (fragp, 3, 0);
14849 }
14850
14851
14852 /* Return the size of a relaxable branch instruction. BITS is the
14853 size of the offset field in the narrow instruction. */
14854
14855 static int
14856 relax_branch (fragS *fragp, asection *sec, int bits)
14857 {
14858 addressT addr;
14859 offsetT val;
14860 offsetT limit;
14861
14862 /* Assume worst case for symbols not known to be in the same section. */
14863 if (!S_IS_DEFINED(fragp->fr_symbol)
14864 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14865 return 4;
14866
14867 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14868 addr = fragp->fr_address + fragp->fr_fix + 4;
14869 val -= addr;
14870
14871 /* Offset is a signed value *2 */
14872 limit = 1 << bits;
14873 if (val >= limit || val < -limit)
14874 return 4;
14875 return 2;
14876 }
14877
14878
14879 /* Relax a machine dependent frag. This returns the amount by which
14880 the current size of the frag should change. */
14881
14882 int
14883 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
14884 {
14885 int oldsize;
14886 int newsize;
14887
14888 oldsize = fragp->fr_var;
14889 switch (fragp->fr_subtype)
14890 {
14891 case T_MNEM_ldr_pc2:
14892 newsize = relax_adr(fragp, sec);
14893 break;
14894 case T_MNEM_ldr_pc:
14895 case T_MNEM_ldr_sp:
14896 case T_MNEM_str_sp:
14897 newsize = relax_immediate(fragp, 8, 2);
14898 break;
14899 case T_MNEM_ldr:
14900 case T_MNEM_str:
14901 newsize = relax_immediate(fragp, 5, 2);
14902 break;
14903 case T_MNEM_ldrh:
14904 case T_MNEM_strh:
14905 newsize = relax_immediate(fragp, 5, 1);
14906 break;
14907 case T_MNEM_ldrb:
14908 case T_MNEM_strb:
14909 newsize = relax_immediate(fragp, 5, 0);
14910 break;
14911 case T_MNEM_adr:
14912 newsize = relax_adr(fragp, sec);
14913 break;
14914 case T_MNEM_mov:
14915 case T_MNEM_movs:
14916 case T_MNEM_cmp:
14917 case T_MNEM_cmn:
14918 newsize = relax_immediate(fragp, 8, 0);
14919 break;
14920 case T_MNEM_b:
14921 newsize = relax_branch(fragp, sec, 11);
14922 break;
14923 case T_MNEM_bcond:
14924 newsize = relax_branch(fragp, sec, 8);
14925 break;
14926 case T_MNEM_add_sp:
14927 case T_MNEM_add_pc:
14928 newsize = relax_immediate (fragp, 8, 2);
14929 break;
14930 case T_MNEM_inc_sp:
14931 case T_MNEM_dec_sp:
14932 newsize = relax_immediate (fragp, 7, 2);
14933 break;
14934 case T_MNEM_addi:
14935 case T_MNEM_addis:
14936 case T_MNEM_subi:
14937 case T_MNEM_subis:
14938 newsize = relax_addsub (fragp, sec);
14939 break;
14940 default:
14941 abort();
14942 }
14943 if (newsize < 0)
14944 {
14945 fragp->fr_var = -newsize;
14946 md_convert_frag (sec->owner, sec, fragp);
14947 frag_wane(fragp);
14948 return -(newsize + oldsize);
14949 }
14950 fragp->fr_var = newsize;
14951 return newsize - oldsize;
14952 }
14953
14954 /* Round up a section size to the appropriate boundary. */
14955
14956 valueT
14957 md_section_align (segT segment ATTRIBUTE_UNUSED,
14958 valueT size)
14959 {
14960 #ifdef OBJ_ELF
14961 return size;
14962 #else
14963 /* Round all sects to multiple of 4. */
14964 return (size + 3) & ~3;
14965 #endif
14966 }
14967
14968 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
14969 of an rs_align_code fragment. */
14970
14971 void
14972 arm_handle_align (fragS * fragP)
14973 {
14974 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
14975 static char const thumb_noop[2] = { 0xc0, 0x46 };
14976 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
14977 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
14978
14979 int bytes, fix, noop_size;
14980 char * p;
14981 const char * noop;
14982
14983 if (fragP->fr_type != rs_align_code)
14984 return;
14985
14986 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
14987 p = fragP->fr_literal + fragP->fr_fix;
14988 fix = 0;
14989
14990 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
14991 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
14992
14993 if (fragP->tc_frag_data)
14994 {
14995 if (target_big_endian)
14996 noop = thumb_bigend_noop;
14997 else
14998 noop = thumb_noop;
14999 noop_size = sizeof (thumb_noop);
15000 }
15001 else
15002 {
15003 if (target_big_endian)
15004 noop = arm_bigend_noop;
15005 else
15006 noop = arm_noop;
15007 noop_size = sizeof (arm_noop);
15008 }
15009
15010 if (bytes & (noop_size - 1))
15011 {
15012 fix = bytes & (noop_size - 1);
15013 memset (p, 0, fix);
15014 p += fix;
15015 bytes -= fix;
15016 }
15017
15018 while (bytes >= noop_size)
15019 {
15020 memcpy (p, noop, noop_size);
15021 p += noop_size;
15022 bytes -= noop_size;
15023 fix += noop_size;
15024 }
15025
15026 fragP->fr_fix += fix;
15027 fragP->fr_var = noop_size;
15028 }
15029
15030 /* Called from md_do_align. Used to create an alignment
15031 frag in a code section. */
15032
15033 void
15034 arm_frag_align_code (int n, int max)
15035 {
15036 char * p;
15037
15038 /* We assume that there will never be a requirement
15039 to support alignments greater than 32 bytes. */
15040 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
15041 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15042
15043 p = frag_var (rs_align_code,
15044 MAX_MEM_FOR_RS_ALIGN_CODE,
15045 1,
15046 (relax_substateT) max,
15047 (symbolS *) NULL,
15048 (offsetT) n,
15049 (char *) NULL);
15050 *p = 0;
15051 }
15052
15053 /* Perform target specific initialisation of a frag. */
15054
15055 void
15056 arm_init_frag (fragS * fragP)
15057 {
15058 /* Record whether this frag is in an ARM or a THUMB area. */
15059 fragP->tc_frag_data = thumb_mode;
15060 }
15061
15062 #ifdef OBJ_ELF
15063 /* When we change sections we need to issue a new mapping symbol. */
15064
15065 void
15066 arm_elf_change_section (void)
15067 {
15068 flagword flags;
15069 segment_info_type *seginfo;
15070
15071 /* Link an unlinked unwind index table section to the .text section. */
15072 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
15073 && elf_linked_to_section (now_seg) == NULL)
15074 elf_linked_to_section (now_seg) = text_section;
15075
15076 if (!SEG_NORMAL (now_seg))
15077 return;
15078
15079 flags = bfd_get_section_flags (stdoutput, now_seg);
15080
15081 /* We can ignore sections that only contain debug info. */
15082 if ((flags & SEC_ALLOC) == 0)
15083 return;
15084
15085 seginfo = seg_info (now_seg);
15086 mapstate = seginfo->tc_segment_info_data.mapstate;
15087 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
15088 }
15089
15090 int
15091 arm_elf_section_type (const char * str, size_t len)
15092 {
15093 if (len == 5 && strncmp (str, "exidx", 5) == 0)
15094 return SHT_ARM_EXIDX;
15095
15096 return -1;
15097 }
15098 \f
15099 /* Code to deal with unwinding tables. */
15100
15101 static void add_unwind_adjustsp (offsetT);
15102
15103 /* Cenerate and deferred unwind frame offset. */
15104
15105 static void
15106 flush_pending_unwind (void)
15107 {
15108 offsetT offset;
15109
15110 offset = unwind.pending_offset;
15111 unwind.pending_offset = 0;
15112 if (offset != 0)
15113 add_unwind_adjustsp (offset);
15114 }
15115
15116 /* Add an opcode to this list for this function. Two-byte opcodes should
15117 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15118 order. */
15119
15120 static void
15121 add_unwind_opcode (valueT op, int length)
15122 {
15123 /* Add any deferred stack adjustment. */
15124 if (unwind.pending_offset)
15125 flush_pending_unwind ();
15126
15127 unwind.sp_restored = 0;
15128
15129 if (unwind.opcode_count + length > unwind.opcode_alloc)
15130 {
15131 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
15132 if (unwind.opcodes)
15133 unwind.opcodes = xrealloc (unwind.opcodes,
15134 unwind.opcode_alloc);
15135 else
15136 unwind.opcodes = xmalloc (unwind.opcode_alloc);
15137 }
15138 while (length > 0)
15139 {
15140 length--;
15141 unwind.opcodes[unwind.opcode_count] = op & 0xff;
15142 op >>= 8;
15143 unwind.opcode_count++;
15144 }
15145 }
15146
15147 /* Add unwind opcodes to adjust the stack pointer. */
15148
15149 static void
15150 add_unwind_adjustsp (offsetT offset)
15151 {
15152 valueT op;
15153
15154 if (offset > 0x200)
15155 {
15156 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15157 char bytes[5];
15158 int n;
15159 valueT o;
15160
15161 /* Long form: 0xb2, uleb128. */
15162 /* This might not fit in a word so add the individual bytes,
15163 remembering the list is built in reverse order. */
15164 o = (valueT) ((offset - 0x204) >> 2);
15165 if (o == 0)
15166 add_unwind_opcode (0, 1);
15167
15168 /* Calculate the uleb128 encoding of the offset. */
15169 n = 0;
15170 while (o)
15171 {
15172 bytes[n] = o & 0x7f;
15173 o >>= 7;
15174 if (o)
15175 bytes[n] |= 0x80;
15176 n++;
15177 }
15178 /* Add the insn. */
15179 for (; n; n--)
15180 add_unwind_opcode (bytes[n - 1], 1);
15181 add_unwind_opcode (0xb2, 1);
15182 }
15183 else if (offset > 0x100)
15184 {
15185 /* Two short opcodes. */
15186 add_unwind_opcode (0x3f, 1);
15187 op = (offset - 0x104) >> 2;
15188 add_unwind_opcode (op, 1);
15189 }
15190 else if (offset > 0)
15191 {
15192 /* Short opcode. */
15193 op = (offset - 4) >> 2;
15194 add_unwind_opcode (op, 1);
15195 }
15196 else if (offset < 0)
15197 {
15198 offset = -offset;
15199 while (offset > 0x100)
15200 {
15201 add_unwind_opcode (0x7f, 1);
15202 offset -= 0x100;
15203 }
15204 op = ((offset - 4) >> 2) | 0x40;
15205 add_unwind_opcode (op, 1);
15206 }
15207 }
15208
15209 /* Finish the list of unwind opcodes for this function. */
15210 static void
15211 finish_unwind_opcodes (void)
15212 {
15213 valueT op;
15214
15215 if (unwind.fp_used)
15216 {
15217 /* Adjust sp as necessary. */
15218 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
15219 flush_pending_unwind ();
15220
15221 /* After restoring sp from the frame pointer. */
15222 op = 0x90 | unwind.fp_reg;
15223 add_unwind_opcode (op, 1);
15224 }
15225 else
15226 flush_pending_unwind ();
15227 }
15228
15229
15230 /* Start an exception table entry. If idx is nonzero this is an index table
15231 entry. */
15232
15233 static void
15234 start_unwind_section (const segT text_seg, int idx)
15235 {
15236 const char * text_name;
15237 const char * prefix;
15238 const char * prefix_once;
15239 const char * group_name;
15240 size_t prefix_len;
15241 size_t text_len;
15242 char * sec_name;
15243 size_t sec_name_len;
15244 int type;
15245 int flags;
15246 int linkonce;
15247
15248 if (idx)
15249 {
15250 prefix = ELF_STRING_ARM_unwind;
15251 prefix_once = ELF_STRING_ARM_unwind_once;
15252 type = SHT_ARM_EXIDX;
15253 }
15254 else
15255 {
15256 prefix = ELF_STRING_ARM_unwind_info;
15257 prefix_once = ELF_STRING_ARM_unwind_info_once;
15258 type = SHT_PROGBITS;
15259 }
15260
15261 text_name = segment_name (text_seg);
15262 if (streq (text_name, ".text"))
15263 text_name = "";
15264
15265 if (strncmp (text_name, ".gnu.linkonce.t.",
15266 strlen (".gnu.linkonce.t.")) == 0)
15267 {
15268 prefix = prefix_once;
15269 text_name += strlen (".gnu.linkonce.t.");
15270 }
15271
15272 prefix_len = strlen (prefix);
15273 text_len = strlen (text_name);
15274 sec_name_len = prefix_len + text_len;
15275 sec_name = xmalloc (sec_name_len + 1);
15276 memcpy (sec_name, prefix, prefix_len);
15277 memcpy (sec_name + prefix_len, text_name, text_len);
15278 sec_name[prefix_len + text_len] = '\0';
15279
15280 flags = SHF_ALLOC;
15281 linkonce = 0;
15282 group_name = 0;
15283
15284 /* Handle COMDAT group. */
15285 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
15286 {
15287 group_name = elf_group_name (text_seg);
15288 if (group_name == NULL)
15289 {
15290 as_bad ("Group section `%s' has no group signature",
15291 segment_name (text_seg));
15292 ignore_rest_of_line ();
15293 return;
15294 }
15295 flags |= SHF_GROUP;
15296 linkonce = 1;
15297 }
15298
15299 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
15300
15301 /* Set the setion link for index tables. */
15302 if (idx)
15303 elf_linked_to_section (now_seg) = text_seg;
15304 }
15305
15306
15307 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15308 personality routine data. Returns zero, or the index table value for
15309 and inline entry. */
15310
15311 static valueT
15312 create_unwind_entry (int have_data)
15313 {
15314 int size;
15315 addressT where;
15316 char *ptr;
15317 /* The current word of data. */
15318 valueT data;
15319 /* The number of bytes left in this word. */
15320 int n;
15321
15322 finish_unwind_opcodes ();
15323
15324 /* Remember the current text section. */
15325 unwind.saved_seg = now_seg;
15326 unwind.saved_subseg = now_subseg;
15327
15328 start_unwind_section (now_seg, 0);
15329
15330 if (unwind.personality_routine == NULL)
15331 {
15332 if (unwind.personality_index == -2)
15333 {
15334 if (have_data)
15335 as_bad (_("handerdata in cantunwind frame"));
15336 return 1; /* EXIDX_CANTUNWIND. */
15337 }
15338
15339 /* Use a default personality routine if none is specified. */
15340 if (unwind.personality_index == -1)
15341 {
15342 if (unwind.opcode_count > 3)
15343 unwind.personality_index = 1;
15344 else
15345 unwind.personality_index = 0;
15346 }
15347
15348 /* Space for the personality routine entry. */
15349 if (unwind.personality_index == 0)
15350 {
15351 if (unwind.opcode_count > 3)
15352 as_bad (_("too many unwind opcodes for personality routine 0"));
15353
15354 if (!have_data)
15355 {
15356 /* All the data is inline in the index table. */
15357 data = 0x80;
15358 n = 3;
15359 while (unwind.opcode_count > 0)
15360 {
15361 unwind.opcode_count--;
15362 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15363 n--;
15364 }
15365
15366 /* Pad with "finish" opcodes. */
15367 while (n--)
15368 data = (data << 8) | 0xb0;
15369
15370 return data;
15371 }
15372 size = 0;
15373 }
15374 else
15375 /* We get two opcodes "free" in the first word. */
15376 size = unwind.opcode_count - 2;
15377 }
15378 else
15379 /* An extra byte is required for the opcode count. */
15380 size = unwind.opcode_count + 1;
15381
15382 size = (size + 3) >> 2;
15383 if (size > 0xff)
15384 as_bad (_("too many unwind opcodes"));
15385
15386 frag_align (2, 0, 0);
15387 record_alignment (now_seg, 2);
15388 unwind.table_entry = expr_build_dot ();
15389
15390 /* Allocate the table entry. */
15391 ptr = frag_more ((size << 2) + 4);
15392 where = frag_now_fix () - ((size << 2) + 4);
15393
15394 switch (unwind.personality_index)
15395 {
15396 case -1:
15397 /* ??? Should this be a PLT generating relocation? */
15398 /* Custom personality routine. */
15399 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
15400 BFD_RELOC_ARM_PREL31);
15401
15402 where += 4;
15403 ptr += 4;
15404
15405 /* Set the first byte to the number of additional words. */
15406 data = size - 1;
15407 n = 3;
15408 break;
15409
15410 /* ABI defined personality routines. */
15411 case 0:
15412 /* Three opcodes bytes are packed into the first word. */
15413 data = 0x80;
15414 n = 3;
15415 break;
15416
15417 case 1:
15418 case 2:
15419 /* The size and first two opcode bytes go in the first word. */
15420 data = ((0x80 + unwind.personality_index) << 8) | size;
15421 n = 2;
15422 break;
15423
15424 default:
15425 /* Should never happen. */
15426 abort ();
15427 }
15428
15429 /* Pack the opcodes into words (MSB first), reversing the list at the same
15430 time. */
15431 while (unwind.opcode_count > 0)
15432 {
15433 if (n == 0)
15434 {
15435 md_number_to_chars (ptr, data, 4);
15436 ptr += 4;
15437 n = 4;
15438 data = 0;
15439 }
15440 unwind.opcode_count--;
15441 n--;
15442 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15443 }
15444
15445 /* Finish off the last word. */
15446 if (n < 4)
15447 {
15448 /* Pad with "finish" opcodes. */
15449 while (n--)
15450 data = (data << 8) | 0xb0;
15451
15452 md_number_to_chars (ptr, data, 4);
15453 }
15454
15455 if (!have_data)
15456 {
15457 /* Add an empty descriptor if there is no user-specified data. */
15458 ptr = frag_more (4);
15459 md_number_to_chars (ptr, 0, 4);
15460 }
15461
15462 return 0;
15463 }
15464
15465 /* Convert REGNAME to a DWARF-2 register number. */
15466
15467 int
15468 tc_arm_regname_to_dw2regnum (const char *regname)
15469 {
15470 int reg = arm_reg_parse ((char **) &regname, REG_TYPE_RN);
15471
15472 if (reg == FAIL)
15473 return -1;
15474
15475 return reg;
15476 }
15477
15478 /* Initialize the DWARF-2 unwind information for this procedure. */
15479
15480 void
15481 tc_arm_frame_initial_instructions (void)
15482 {
15483 cfi_add_CFA_def_cfa (REG_SP, 0);
15484 }
15485 #endif /* OBJ_ELF */
15486
15487
15488 /* MD interface: Symbol and relocation handling. */
15489
15490 /* Return the address within the segment that a PC-relative fixup is
15491 relative to. For ARM, PC-relative fixups applied to instructions
15492 are generally relative to the location of the fixup plus 8 bytes.
15493 Thumb branches are offset by 4, and Thumb loads relative to PC
15494 require special handling. */
15495
15496 long
15497 md_pcrel_from_section (fixS * fixP, segT seg)
15498 {
15499 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
15500
15501 /* If this is pc-relative and we are going to emit a relocation
15502 then we just want to put out any pipeline compensation that the linker
15503 will need. Otherwise we want to use the calculated base. */
15504 if (fixP->fx_pcrel
15505 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
15506 || arm_force_relocation (fixP)))
15507 base = 0;
15508
15509 switch (fixP->fx_r_type)
15510 {
15511 /* PC relative addressing on the Thumb is slightly odd as the
15512 bottom two bits of the PC are forced to zero for the
15513 calculation. This happens *after* application of the
15514 pipeline offset. However, Thumb adrl already adjusts for
15515 this, so we need not do it again. */
15516 case BFD_RELOC_ARM_THUMB_ADD:
15517 return base & ~3;
15518
15519 case BFD_RELOC_ARM_THUMB_OFFSET:
15520 case BFD_RELOC_ARM_T32_OFFSET_IMM:
15521 case BFD_RELOC_ARM_T32_ADD_PC12:
15522 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
15523 return (base + 4) & ~3;
15524
15525 /* Thumb branches are simply offset by +4. */
15526 case BFD_RELOC_THUMB_PCREL_BRANCH7:
15527 case BFD_RELOC_THUMB_PCREL_BRANCH9:
15528 case BFD_RELOC_THUMB_PCREL_BRANCH12:
15529 case BFD_RELOC_THUMB_PCREL_BRANCH20:
15530 case BFD_RELOC_THUMB_PCREL_BRANCH23:
15531 case BFD_RELOC_THUMB_PCREL_BRANCH25:
15532 case BFD_RELOC_THUMB_PCREL_BLX:
15533 return base + 4;
15534
15535 /* ARM mode branches are offset by +8. However, the Windows CE
15536 loader expects the relocation not to take this into account. */
15537 case BFD_RELOC_ARM_PCREL_BRANCH:
15538 case BFD_RELOC_ARM_PCREL_CALL:
15539 case BFD_RELOC_ARM_PCREL_JUMP:
15540 case BFD_RELOC_ARM_PCREL_BLX:
15541 case BFD_RELOC_ARM_PLT32:
15542 #ifdef TE_WINCE
15543 return base;
15544 #else
15545 return base + 8;
15546 #endif
15547
15548 /* ARM mode loads relative to PC are also offset by +8. Unlike
15549 branches, the Windows CE loader *does* expect the relocation
15550 to take this into account. */
15551 case BFD_RELOC_ARM_OFFSET_IMM:
15552 case BFD_RELOC_ARM_OFFSET_IMM8:
15553 case BFD_RELOC_ARM_HWLITERAL:
15554 case BFD_RELOC_ARM_LITERAL:
15555 case BFD_RELOC_ARM_CP_OFF_IMM:
15556 return base + 8;
15557
15558
15559 /* Other PC-relative relocations are un-offset. */
15560 default:
15561 return base;
15562 }
15563 }
15564
15565 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15566 Otherwise we have no need to default values of symbols. */
15567
15568 symbolS *
15569 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
15570 {
15571 #ifdef OBJ_ELF
15572 if (name[0] == '_' && name[1] == 'G'
15573 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
15574 {
15575 if (!GOT_symbol)
15576 {
15577 if (symbol_find (name))
15578 as_bad ("GOT already in the symbol table");
15579
15580 GOT_symbol = symbol_new (name, undefined_section,
15581 (valueT) 0, & zero_address_frag);
15582 }
15583
15584 return GOT_symbol;
15585 }
15586 #endif
15587
15588 return 0;
15589 }
15590
15591 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15592 computed as two separate immediate values, added together. We
15593 already know that this value cannot be computed by just one ARM
15594 instruction. */
15595
15596 static unsigned int
15597 validate_immediate_twopart (unsigned int val,
15598 unsigned int * highpart)
15599 {
15600 unsigned int a;
15601 unsigned int i;
15602
15603 for (i = 0; i < 32; i += 2)
15604 if (((a = rotate_left (val, i)) & 0xff) != 0)
15605 {
15606 if (a & 0xff00)
15607 {
15608 if (a & ~ 0xffff)
15609 continue;
15610 * highpart = (a >> 8) | ((i + 24) << 7);
15611 }
15612 else if (a & 0xff0000)
15613 {
15614 if (a & 0xff000000)
15615 continue;
15616 * highpart = (a >> 16) | ((i + 16) << 7);
15617 }
15618 else
15619 {
15620 assert (a & 0xff000000);
15621 * highpart = (a >> 24) | ((i + 8) << 7);
15622 }
15623
15624 return (a & 0xff) | (i << 7);
15625 }
15626
15627 return FAIL;
15628 }
15629
15630 static int
15631 validate_offset_imm (unsigned int val, int hwse)
15632 {
15633 if ((hwse && val > 255) || val > 4095)
15634 return FAIL;
15635 return val;
15636 }
15637
15638 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15639 negative immediate constant by altering the instruction. A bit of
15640 a hack really.
15641 MOV <-> MVN
15642 AND <-> BIC
15643 ADC <-> SBC
15644 by inverting the second operand, and
15645 ADD <-> SUB
15646 CMP <-> CMN
15647 by negating the second operand. */
15648
15649 static int
15650 negate_data_op (unsigned long * instruction,
15651 unsigned long value)
15652 {
15653 int op, new_inst;
15654 unsigned long negated, inverted;
15655
15656 negated = encode_arm_immediate (-value);
15657 inverted = encode_arm_immediate (~value);
15658
15659 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
15660 switch (op)
15661 {
15662 /* First negates. */
15663 case OPCODE_SUB: /* ADD <-> SUB */
15664 new_inst = OPCODE_ADD;
15665 value = negated;
15666 break;
15667
15668 case OPCODE_ADD:
15669 new_inst = OPCODE_SUB;
15670 value = negated;
15671 break;
15672
15673 case OPCODE_CMP: /* CMP <-> CMN */
15674 new_inst = OPCODE_CMN;
15675 value = negated;
15676 break;
15677
15678 case OPCODE_CMN:
15679 new_inst = OPCODE_CMP;
15680 value = negated;
15681 break;
15682
15683 /* Now Inverted ops. */
15684 case OPCODE_MOV: /* MOV <-> MVN */
15685 new_inst = OPCODE_MVN;
15686 value = inverted;
15687 break;
15688
15689 case OPCODE_MVN:
15690 new_inst = OPCODE_MOV;
15691 value = inverted;
15692 break;
15693
15694 case OPCODE_AND: /* AND <-> BIC */
15695 new_inst = OPCODE_BIC;
15696 value = inverted;
15697 break;
15698
15699 case OPCODE_BIC:
15700 new_inst = OPCODE_AND;
15701 value = inverted;
15702 break;
15703
15704 case OPCODE_ADC: /* ADC <-> SBC */
15705 new_inst = OPCODE_SBC;
15706 value = inverted;
15707 break;
15708
15709 case OPCODE_SBC:
15710 new_inst = OPCODE_ADC;
15711 value = inverted;
15712 break;
15713
15714 /* We cannot do anything. */
15715 default:
15716 return FAIL;
15717 }
15718
15719 if (value == (unsigned) FAIL)
15720 return FAIL;
15721
15722 *instruction &= OPCODE_MASK;
15723 *instruction |= new_inst << DATA_OP_SHIFT;
15724 return value;
15725 }
15726
15727 /* Like negate_data_op, but for Thumb-2. */
15728
15729 static unsigned int
15730 thumb32_negate_data_op (offsetT *instruction, offsetT value)
15731 {
15732 int op, new_inst;
15733 int rd;
15734 offsetT negated, inverted;
15735
15736 negated = encode_thumb32_immediate (-value);
15737 inverted = encode_thumb32_immediate (~value);
15738
15739 rd = (*instruction >> 8) & 0xf;
15740 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
15741 switch (op)
15742 {
15743 /* ADD <-> SUB. Includes CMP <-> CMN. */
15744 case T2_OPCODE_SUB:
15745 new_inst = T2_OPCODE_ADD;
15746 value = negated;
15747 break;
15748
15749 case T2_OPCODE_ADD:
15750 new_inst = T2_OPCODE_SUB;
15751 value = negated;
15752 break;
15753
15754 /* ORR <-> ORN. Includes MOV <-> MVN. */
15755 case T2_OPCODE_ORR:
15756 new_inst = T2_OPCODE_ORN;
15757 value = inverted;
15758 break;
15759
15760 case T2_OPCODE_ORN:
15761 new_inst = T2_OPCODE_ORR;
15762 value = inverted;
15763 break;
15764
15765 /* AND <-> BIC. TST has no inverted equivalent. */
15766 case T2_OPCODE_AND:
15767 new_inst = T2_OPCODE_BIC;
15768 if (rd == 15)
15769 value = FAIL;
15770 else
15771 value = inverted;
15772 break;
15773
15774 case T2_OPCODE_BIC:
15775 new_inst = T2_OPCODE_AND;
15776 value = inverted;
15777 break;
15778
15779 /* ADC <-> SBC */
15780 case T2_OPCODE_ADC:
15781 new_inst = T2_OPCODE_SBC;
15782 value = inverted;
15783 break;
15784
15785 case T2_OPCODE_SBC:
15786 new_inst = T2_OPCODE_ADC;
15787 value = inverted;
15788 break;
15789
15790 /* We cannot do anything. */
15791 default:
15792 return FAIL;
15793 }
15794
15795 if (value == FAIL)
15796 return FAIL;
15797
15798 *instruction &= T2_OPCODE_MASK;
15799 *instruction |= new_inst << T2_DATA_OP_SHIFT;
15800 return value;
15801 }
15802
15803 /* Read a 32-bit thumb instruction from buf. */
15804 static unsigned long
15805 get_thumb32_insn (char * buf)
15806 {
15807 unsigned long insn;
15808 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
15809 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15810
15811 return insn;
15812 }
15813
15814 void
15815 md_apply_fix (fixS * fixP,
15816 valueT * valP,
15817 segT seg)
15818 {
15819 offsetT value = * valP;
15820 offsetT newval;
15821 unsigned int newimm;
15822 unsigned long temp;
15823 int sign;
15824 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
15825
15826 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
15827
15828 /* Note whether this will delete the relocation. */
15829 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
15830 fixP->fx_done = 1;
15831
15832 /* On a 64-bit host, silently truncate 'value' to 32 bits for
15833 consistency with the behavior on 32-bit hosts. Remember value
15834 for emit_reloc. */
15835 value &= 0xffffffff;
15836 value ^= 0x80000000;
15837 value -= 0x80000000;
15838
15839 *valP = value;
15840 fixP->fx_addnumber = value;
15841
15842 /* Same treatment for fixP->fx_offset. */
15843 fixP->fx_offset &= 0xffffffff;
15844 fixP->fx_offset ^= 0x80000000;
15845 fixP->fx_offset -= 0x80000000;
15846
15847 switch (fixP->fx_r_type)
15848 {
15849 case BFD_RELOC_NONE:
15850 /* This will need to go in the object file. */
15851 fixP->fx_done = 0;
15852 break;
15853
15854 case BFD_RELOC_ARM_IMMEDIATE:
15855 /* We claim that this fixup has been processed here,
15856 even if in fact we generate an error because we do
15857 not have a reloc for it, so tc_gen_reloc will reject it. */
15858 fixP->fx_done = 1;
15859
15860 if (fixP->fx_addsy
15861 && ! S_IS_DEFINED (fixP->fx_addsy))
15862 {
15863 as_bad_where (fixP->fx_file, fixP->fx_line,
15864 _("undefined symbol %s used as an immediate value"),
15865 S_GET_NAME (fixP->fx_addsy));
15866 break;
15867 }
15868
15869 newimm = encode_arm_immediate (value);
15870 temp = md_chars_to_number (buf, INSN_SIZE);
15871
15872 /* If the instruction will fail, see if we can fix things up by
15873 changing the opcode. */
15874 if (newimm == (unsigned int) FAIL
15875 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
15876 {
15877 as_bad_where (fixP->fx_file, fixP->fx_line,
15878 _("invalid constant (%lx) after fixup"),
15879 (unsigned long) value);
15880 break;
15881 }
15882
15883 newimm |= (temp & 0xfffff000);
15884 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
15885 break;
15886
15887 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
15888 {
15889 unsigned int highpart = 0;
15890 unsigned int newinsn = 0xe1a00000; /* nop. */
15891
15892 newimm = encode_arm_immediate (value);
15893 temp = md_chars_to_number (buf, INSN_SIZE);
15894
15895 /* If the instruction will fail, see if we can fix things up by
15896 changing the opcode. */
15897 if (newimm == (unsigned int) FAIL
15898 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
15899 {
15900 /* No ? OK - try using two ADD instructions to generate
15901 the value. */
15902 newimm = validate_immediate_twopart (value, & highpart);
15903
15904 /* Yes - then make sure that the second instruction is
15905 also an add. */
15906 if (newimm != (unsigned int) FAIL)
15907 newinsn = temp;
15908 /* Still No ? Try using a negated value. */
15909 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
15910 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
15911 /* Otherwise - give up. */
15912 else
15913 {
15914 as_bad_where (fixP->fx_file, fixP->fx_line,
15915 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
15916 (long) value);
15917 break;
15918 }
15919
15920 /* Replace the first operand in the 2nd instruction (which
15921 is the PC) with the destination register. We have
15922 already added in the PC in the first instruction and we
15923 do not want to do it again. */
15924 newinsn &= ~ 0xf0000;
15925 newinsn |= ((newinsn & 0x0f000) << 4);
15926 }
15927
15928 newimm |= (temp & 0xfffff000);
15929 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
15930
15931 highpart |= (newinsn & 0xfffff000);
15932 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
15933 }
15934 break;
15935
15936 case BFD_RELOC_ARM_OFFSET_IMM:
15937 if (!fixP->fx_done && seg->use_rela_p)
15938 value = 0;
15939
15940 case BFD_RELOC_ARM_LITERAL:
15941 sign = value >= 0;
15942
15943 if (value < 0)
15944 value = - value;
15945
15946 if (validate_offset_imm (value, 0) == FAIL)
15947 {
15948 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
15949 as_bad_where (fixP->fx_file, fixP->fx_line,
15950 _("invalid literal constant: pool needs to be closer"));
15951 else
15952 as_bad_where (fixP->fx_file, fixP->fx_line,
15953 _("bad immediate value for offset (%ld)"),
15954 (long) value);
15955 break;
15956 }
15957
15958 newval = md_chars_to_number (buf, INSN_SIZE);
15959 newval &= 0xff7ff000;
15960 newval |= value | (sign ? INDEX_UP : 0);
15961 md_number_to_chars (buf, newval, INSN_SIZE);
15962 break;
15963
15964 case BFD_RELOC_ARM_OFFSET_IMM8:
15965 case BFD_RELOC_ARM_HWLITERAL:
15966 sign = value >= 0;
15967
15968 if (value < 0)
15969 value = - value;
15970
15971 if (validate_offset_imm (value, 1) == FAIL)
15972 {
15973 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
15974 as_bad_where (fixP->fx_file, fixP->fx_line,
15975 _("invalid literal constant: pool needs to be closer"));
15976 else
15977 as_bad (_("bad immediate value for half-word offset (%ld)"),
15978 (long) value);
15979 break;
15980 }
15981
15982 newval = md_chars_to_number (buf, INSN_SIZE);
15983 newval &= 0xff7ff0f0;
15984 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
15985 md_number_to_chars (buf, newval, INSN_SIZE);
15986 break;
15987
15988 case BFD_RELOC_ARM_T32_OFFSET_U8:
15989 if (value < 0 || value > 1020 || value % 4 != 0)
15990 as_bad_where (fixP->fx_file, fixP->fx_line,
15991 _("bad immediate value for offset (%ld)"), (long) value);
15992 value /= 4;
15993
15994 newval = md_chars_to_number (buf+2, THUMB_SIZE);
15995 newval |= value;
15996 md_number_to_chars (buf+2, newval, THUMB_SIZE);
15997 break;
15998
15999 case BFD_RELOC_ARM_T32_OFFSET_IMM:
16000 /* This is a complicated relocation used for all varieties of Thumb32
16001 load/store instruction with immediate offset:
16002
16003 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16004 *4, optional writeback(W)
16005 (doubleword load/store)
16006
16007 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16008 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16009 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16010 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16011 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16012
16013 Uppercase letters indicate bits that are already encoded at
16014 this point. Lowercase letters are our problem. For the
16015 second block of instructions, the secondary opcode nybble
16016 (bits 8..11) is present, and bit 23 is zero, even if this is
16017 a PC-relative operation. */
16018 newval = md_chars_to_number (buf, THUMB_SIZE);
16019 newval <<= 16;
16020 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
16021
16022 if ((newval & 0xf0000000) == 0xe0000000)
16023 {
16024 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16025 if (value >= 0)
16026 newval |= (1 << 23);
16027 else
16028 value = -value;
16029 if (value % 4 != 0)
16030 {
16031 as_bad_where (fixP->fx_file, fixP->fx_line,
16032 _("offset not a multiple of 4"));
16033 break;
16034 }
16035 value /= 4;
16036 if (value > 0xff)
16037 {
16038 as_bad_where (fixP->fx_file, fixP->fx_line,
16039 _("offset out of range"));
16040 break;
16041 }
16042 newval &= ~0xff;
16043 }
16044 else if ((newval & 0x000f0000) == 0x000f0000)
16045 {
16046 /* PC-relative, 12-bit offset. */
16047 if (value >= 0)
16048 newval |= (1 << 23);
16049 else
16050 value = -value;
16051 if (value > 0xfff)
16052 {
16053 as_bad_where (fixP->fx_file, fixP->fx_line,
16054 _("offset out of range"));
16055 break;
16056 }
16057 newval &= ~0xfff;
16058 }
16059 else if ((newval & 0x00000100) == 0x00000100)
16060 {
16061 /* Writeback: 8-bit, +/- offset. */
16062 if (value >= 0)
16063 newval |= (1 << 9);
16064 else
16065 value = -value;
16066 if (value > 0xff)
16067 {
16068 as_bad_where (fixP->fx_file, fixP->fx_line,
16069 _("offset out of range"));
16070 break;
16071 }
16072 newval &= ~0xff;
16073 }
16074 else if ((newval & 0x00000f00) == 0x00000e00)
16075 {
16076 /* T-instruction: positive 8-bit offset. */
16077 if (value < 0 || value > 0xff)
16078 {
16079 as_bad_where (fixP->fx_file, fixP->fx_line,
16080 _("offset out of range"));
16081 break;
16082 }
16083 newval &= ~0xff;
16084 newval |= value;
16085 }
16086 else
16087 {
16088 /* Positive 12-bit or negative 8-bit offset. */
16089 int limit;
16090 if (value >= 0)
16091 {
16092 newval |= (1 << 23);
16093 limit = 0xfff;
16094 }
16095 else
16096 {
16097 value = -value;
16098 limit = 0xff;
16099 }
16100 if (value > limit)
16101 {
16102 as_bad_where (fixP->fx_file, fixP->fx_line,
16103 _("offset out of range"));
16104 break;
16105 }
16106 newval &= ~limit;
16107 }
16108
16109 newval |= value;
16110 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
16111 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
16112 break;
16113
16114 case BFD_RELOC_ARM_SHIFT_IMM:
16115 newval = md_chars_to_number (buf, INSN_SIZE);
16116 if (((unsigned long) value) > 32
16117 || (value == 32
16118 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
16119 {
16120 as_bad_where (fixP->fx_file, fixP->fx_line,
16121 _("shift expression is too large"));
16122 break;
16123 }
16124
16125 if (value == 0)
16126 /* Shifts of zero must be done as lsl. */
16127 newval &= ~0x60;
16128 else if (value == 32)
16129 value = 0;
16130 newval &= 0xfffff07f;
16131 newval |= (value & 0x1f) << 7;
16132 md_number_to_chars (buf, newval, INSN_SIZE);
16133 break;
16134
16135 case BFD_RELOC_ARM_T32_IMMEDIATE:
16136 case BFD_RELOC_ARM_T32_IMM12:
16137 case BFD_RELOC_ARM_T32_ADD_PC12:
16138 /* We claim that this fixup has been processed here,
16139 even if in fact we generate an error because we do
16140 not have a reloc for it, so tc_gen_reloc will reject it. */
16141 fixP->fx_done = 1;
16142
16143 if (fixP->fx_addsy
16144 && ! S_IS_DEFINED (fixP->fx_addsy))
16145 {
16146 as_bad_where (fixP->fx_file, fixP->fx_line,
16147 _("undefined symbol %s used as an immediate value"),
16148 S_GET_NAME (fixP->fx_addsy));
16149 break;
16150 }
16151
16152 newval = md_chars_to_number (buf, THUMB_SIZE);
16153 newval <<= 16;
16154 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
16155
16156 /* FUTURE: Implement analogue of negate_data_op for T32. */
16157 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE)
16158 {
16159 newimm = encode_thumb32_immediate (value);
16160 if (newimm == (unsigned int) FAIL)
16161 newimm = thumb32_negate_data_op (&newval, value);
16162 }
16163 else
16164 {
16165 /* 12 bit immediate for addw/subw. */
16166 if (value < 0)
16167 {
16168 value = -value;
16169 newval ^= 0x00a00000;
16170 }
16171 if (value > 0xfff)
16172 newimm = (unsigned int) FAIL;
16173 else
16174 newimm = value;
16175 }
16176
16177 if (newimm == (unsigned int)FAIL)
16178 {
16179 as_bad_where (fixP->fx_file, fixP->fx_line,
16180 _("invalid constant (%lx) after fixup"),
16181 (unsigned long) value);
16182 break;
16183 }
16184
16185 newval |= (newimm & 0x800) << 15;
16186 newval |= (newimm & 0x700) << 4;
16187 newval |= (newimm & 0x0ff);
16188
16189 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
16190 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
16191 break;
16192
16193 case BFD_RELOC_ARM_SMC:
16194 if (((unsigned long) value) > 0xffff)
16195 as_bad_where (fixP->fx_file, fixP->fx_line,
16196 _("invalid smc expression"));
16197 newval = md_chars_to_number (buf, INSN_SIZE);
16198 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
16199 md_number_to_chars (buf, newval, INSN_SIZE);
16200 break;
16201
16202 case BFD_RELOC_ARM_SWI:
16203 if (fixP->tc_fix_data != 0)
16204 {
16205 if (((unsigned long) value) > 0xff)
16206 as_bad_where (fixP->fx_file, fixP->fx_line,
16207 _("invalid swi expression"));
16208 newval = md_chars_to_number (buf, THUMB_SIZE);
16209 newval |= value;
16210 md_number_to_chars (buf, newval, THUMB_SIZE);
16211 }
16212 else
16213 {
16214 if (((unsigned long) value) > 0x00ffffff)
16215 as_bad_where (fixP->fx_file, fixP->fx_line,
16216 _("invalid swi expression"));
16217 newval = md_chars_to_number (buf, INSN_SIZE);
16218 newval |= value;
16219 md_number_to_chars (buf, newval, INSN_SIZE);
16220 }
16221 break;
16222
16223 case BFD_RELOC_ARM_MULTI:
16224 if (((unsigned long) value) > 0xffff)
16225 as_bad_where (fixP->fx_file, fixP->fx_line,
16226 _("invalid expression in load/store multiple"));
16227 newval = value | md_chars_to_number (buf, INSN_SIZE);
16228 md_number_to_chars (buf, newval, INSN_SIZE);
16229 break;
16230
16231 #ifdef OBJ_ELF
16232 case BFD_RELOC_ARM_PCREL_CALL:
16233 newval = md_chars_to_number (buf, INSN_SIZE);
16234 if ((newval & 0xf0000000) == 0xf0000000)
16235 temp = 1;
16236 else
16237 temp = 3;
16238 goto arm_branch_common;
16239
16240 case BFD_RELOC_ARM_PCREL_JUMP:
16241 case BFD_RELOC_ARM_PLT32:
16242 #endif
16243 case BFD_RELOC_ARM_PCREL_BRANCH:
16244 temp = 3;
16245 goto arm_branch_common;
16246
16247 case BFD_RELOC_ARM_PCREL_BLX:
16248 temp = 1;
16249 arm_branch_common:
16250 /* We are going to store value (shifted right by two) in the
16251 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16252 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16253 also be be clear. */
16254 if (value & temp)
16255 as_bad_where (fixP->fx_file, fixP->fx_line,
16256 _("misaligned branch destination"));
16257 if ((value & (offsetT)0xfe000000) != (offsetT)0
16258 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
16259 as_bad_where (fixP->fx_file, fixP->fx_line,
16260 _("branch out of range"));
16261
16262 if (fixP->fx_done || !seg->use_rela_p)
16263 {
16264 newval = md_chars_to_number (buf, INSN_SIZE);
16265 newval |= (value >> 2) & 0x00ffffff;
16266 /* Set the H bit on BLX instructions. */
16267 if (temp == 1)
16268 {
16269 if (value & 2)
16270 newval |= 0x01000000;
16271 else
16272 newval &= ~0x01000000;
16273 }
16274 md_number_to_chars (buf, newval, INSN_SIZE);
16275 }
16276 break;
16277
16278 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
16279 /* CZB can only branch forward. */
16280 if (value & ~0x7e)
16281 as_bad_where (fixP->fx_file, fixP->fx_line,
16282 _("branch out of range"));
16283
16284 if (fixP->fx_done || !seg->use_rela_p)
16285 {
16286 newval = md_chars_to_number (buf, THUMB_SIZE);
16287 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
16288 md_number_to_chars (buf, newval, THUMB_SIZE);
16289 }
16290 break;
16291
16292 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
16293 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
16294 as_bad_where (fixP->fx_file, fixP->fx_line,
16295 _("branch out of range"));
16296
16297 if (fixP->fx_done || !seg->use_rela_p)
16298 {
16299 newval = md_chars_to_number (buf, THUMB_SIZE);
16300 newval |= (value & 0x1ff) >> 1;
16301 md_number_to_chars (buf, newval, THUMB_SIZE);
16302 }
16303 break;
16304
16305 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
16306 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
16307 as_bad_where (fixP->fx_file, fixP->fx_line,
16308 _("branch out of range"));
16309
16310 if (fixP->fx_done || !seg->use_rela_p)
16311 {
16312 newval = md_chars_to_number (buf, THUMB_SIZE);
16313 newval |= (value & 0xfff) >> 1;
16314 md_number_to_chars (buf, newval, THUMB_SIZE);
16315 }
16316 break;
16317
16318 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16319 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
16320 as_bad_where (fixP->fx_file, fixP->fx_line,
16321 _("conditional branch out of range"));
16322
16323 if (fixP->fx_done || !seg->use_rela_p)
16324 {
16325 offsetT newval2;
16326 addressT S, J1, J2, lo, hi;
16327
16328 S = (value & 0x00100000) >> 20;
16329 J2 = (value & 0x00080000) >> 19;
16330 J1 = (value & 0x00040000) >> 18;
16331 hi = (value & 0x0003f000) >> 12;
16332 lo = (value & 0x00000ffe) >> 1;
16333
16334 newval = md_chars_to_number (buf, THUMB_SIZE);
16335 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16336 newval |= (S << 10) | hi;
16337 newval2 |= (J1 << 13) | (J2 << 11) | lo;
16338 md_number_to_chars (buf, newval, THUMB_SIZE);
16339 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16340 }
16341 break;
16342
16343 case BFD_RELOC_THUMB_PCREL_BLX:
16344 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16345 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
16346 as_bad_where (fixP->fx_file, fixP->fx_line,
16347 _("branch out of range"));
16348
16349 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
16350 /* For a BLX instruction, make sure that the relocation is rounded up
16351 to a word boundary. This follows the semantics of the instruction
16352 which specifies that bit 1 of the target address will come from bit
16353 1 of the base address. */
16354 value = (value + 1) & ~ 1;
16355
16356 if (fixP->fx_done || !seg->use_rela_p)
16357 {
16358 offsetT newval2;
16359
16360 newval = md_chars_to_number (buf, THUMB_SIZE);
16361 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16362 newval |= (value & 0x7fffff) >> 12;
16363 newval2 |= (value & 0xfff) >> 1;
16364 md_number_to_chars (buf, newval, THUMB_SIZE);
16365 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16366 }
16367 break;
16368
16369 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16370 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
16371 as_bad_where (fixP->fx_file, fixP->fx_line,
16372 _("branch out of range"));
16373
16374 if (fixP->fx_done || !seg->use_rela_p)
16375 {
16376 offsetT newval2;
16377 addressT S, I1, I2, lo, hi;
16378
16379 S = (value & 0x01000000) >> 24;
16380 I1 = (value & 0x00800000) >> 23;
16381 I2 = (value & 0x00400000) >> 22;
16382 hi = (value & 0x003ff000) >> 12;
16383 lo = (value & 0x00000ffe) >> 1;
16384
16385 I1 = !(I1 ^ S);
16386 I2 = !(I2 ^ S);
16387
16388 newval = md_chars_to_number (buf, THUMB_SIZE);
16389 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16390 newval |= (S << 10) | hi;
16391 newval2 |= (I1 << 13) | (I2 << 11) | lo;
16392 md_number_to_chars (buf, newval, THUMB_SIZE);
16393 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16394 }
16395 break;
16396
16397 case BFD_RELOC_8:
16398 if (fixP->fx_done || !seg->use_rela_p)
16399 md_number_to_chars (buf, value, 1);
16400 break;
16401
16402 case BFD_RELOC_16:
16403 if (fixP->fx_done || !seg->use_rela_p)
16404 md_number_to_chars (buf, value, 2);
16405 break;
16406
16407 #ifdef OBJ_ELF
16408 case BFD_RELOC_ARM_TLS_GD32:
16409 case BFD_RELOC_ARM_TLS_LE32:
16410 case BFD_RELOC_ARM_TLS_IE32:
16411 case BFD_RELOC_ARM_TLS_LDM32:
16412 case BFD_RELOC_ARM_TLS_LDO32:
16413 S_SET_THREAD_LOCAL (fixP->fx_addsy);
16414 /* fall through */
16415
16416 case BFD_RELOC_ARM_GOT32:
16417 case BFD_RELOC_ARM_GOTOFF:
16418 case BFD_RELOC_ARM_TARGET2:
16419 if (fixP->fx_done || !seg->use_rela_p)
16420 md_number_to_chars (buf, 0, 4);
16421 break;
16422 #endif
16423
16424 case BFD_RELOC_RVA:
16425 case BFD_RELOC_32:
16426 case BFD_RELOC_ARM_TARGET1:
16427 case BFD_RELOC_ARM_ROSEGREL32:
16428 case BFD_RELOC_ARM_SBREL32:
16429 case BFD_RELOC_32_PCREL:
16430 if (fixP->fx_done || !seg->use_rela_p)
16431 md_number_to_chars (buf, value, 4);
16432 break;
16433
16434 #ifdef OBJ_ELF
16435 case BFD_RELOC_ARM_PREL31:
16436 if (fixP->fx_done || !seg->use_rela_p)
16437 {
16438 newval = md_chars_to_number (buf, 4) & 0x80000000;
16439 if ((value ^ (value >> 1)) & 0x40000000)
16440 {
16441 as_bad_where (fixP->fx_file, fixP->fx_line,
16442 _("rel31 relocation overflow"));
16443 }
16444 newval |= value & 0x7fffffff;
16445 md_number_to_chars (buf, newval, 4);
16446 }
16447 break;
16448 #endif
16449
16450 case BFD_RELOC_ARM_CP_OFF_IMM:
16451 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
16452 if (value < -1023 || value > 1023 || (value & 3))
16453 as_bad_where (fixP->fx_file, fixP->fx_line,
16454 _("co-processor offset out of range"));
16455 cp_off_common:
16456 sign = value >= 0;
16457 if (value < 0)
16458 value = -value;
16459 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16460 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16461 newval = md_chars_to_number (buf, INSN_SIZE);
16462 else
16463 newval = get_thumb32_insn (buf);
16464 newval &= 0xff7fff00;
16465 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
16466 if (value == 0)
16467 newval &= ~WRITE_BACK;
16468 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16469 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16470 md_number_to_chars (buf, newval, INSN_SIZE);
16471 else
16472 put_thumb32_insn (buf, newval);
16473 break;
16474
16475 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
16476 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
16477 if (value < -255 || value > 255)
16478 as_bad_where (fixP->fx_file, fixP->fx_line,
16479 _("co-processor offset out of range"));
16480 goto cp_off_common;
16481
16482 case BFD_RELOC_ARM_THUMB_OFFSET:
16483 newval = md_chars_to_number (buf, THUMB_SIZE);
16484 /* Exactly what ranges, and where the offset is inserted depends
16485 on the type of instruction, we can establish this from the
16486 top 4 bits. */
16487 switch (newval >> 12)
16488 {
16489 case 4: /* PC load. */
16490 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16491 forced to zero for these loads; md_pcrel_from has already
16492 compensated for this. */
16493 if (value & 3)
16494 as_bad_where (fixP->fx_file, fixP->fx_line,
16495 _("invalid offset, target not word aligned (0x%08lX)"),
16496 (((unsigned long) fixP->fx_frag->fr_address
16497 + (unsigned long) fixP->fx_where) & ~3)
16498 + (unsigned long) value);
16499
16500 if (value & ~0x3fc)
16501 as_bad_where (fixP->fx_file, fixP->fx_line,
16502 _("invalid offset, value too big (0x%08lX)"),
16503 (long) value);
16504
16505 newval |= value >> 2;
16506 break;
16507
16508 case 9: /* SP load/store. */
16509 if (value & ~0x3fc)
16510 as_bad_where (fixP->fx_file, fixP->fx_line,
16511 _("invalid offset, value too big (0x%08lX)"),
16512 (long) value);
16513 newval |= value >> 2;
16514 break;
16515
16516 case 6: /* Word load/store. */
16517 if (value & ~0x7c)
16518 as_bad_where (fixP->fx_file, fixP->fx_line,
16519 _("invalid offset, value too big (0x%08lX)"),
16520 (long) value);
16521 newval |= value << 4; /* 6 - 2. */
16522 break;
16523
16524 case 7: /* Byte load/store. */
16525 if (value & ~0x1f)
16526 as_bad_where (fixP->fx_file, fixP->fx_line,
16527 _("invalid offset, value too big (0x%08lX)"),
16528 (long) value);
16529 newval |= value << 6;
16530 break;
16531
16532 case 8: /* Halfword load/store. */
16533 if (value & ~0x3e)
16534 as_bad_where (fixP->fx_file, fixP->fx_line,
16535 _("invalid offset, value too big (0x%08lX)"),
16536 (long) value);
16537 newval |= value << 5; /* 6 - 1. */
16538 break;
16539
16540 default:
16541 as_bad_where (fixP->fx_file, fixP->fx_line,
16542 "Unable to process relocation for thumb opcode: %lx",
16543 (unsigned long) newval);
16544 break;
16545 }
16546 md_number_to_chars (buf, newval, THUMB_SIZE);
16547 break;
16548
16549 case BFD_RELOC_ARM_THUMB_ADD:
16550 /* This is a complicated relocation, since we use it for all of
16551 the following immediate relocations:
16552
16553 3bit ADD/SUB
16554 8bit ADD/SUB
16555 9bit ADD/SUB SP word-aligned
16556 10bit ADD PC/SP word-aligned
16557
16558 The type of instruction being processed is encoded in the
16559 instruction field:
16560
16561 0x8000 SUB
16562 0x00F0 Rd
16563 0x000F Rs
16564 */
16565 newval = md_chars_to_number (buf, THUMB_SIZE);
16566 {
16567 int rd = (newval >> 4) & 0xf;
16568 int rs = newval & 0xf;
16569 int subtract = !!(newval & 0x8000);
16570
16571 /* Check for HI regs, only very restricted cases allowed:
16572 Adjusting SP, and using PC or SP to get an address. */
16573 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
16574 || (rs > 7 && rs != REG_SP && rs != REG_PC))
16575 as_bad_where (fixP->fx_file, fixP->fx_line,
16576 _("invalid Hi register with immediate"));
16577
16578 /* If value is negative, choose the opposite instruction. */
16579 if (value < 0)
16580 {
16581 value = -value;
16582 subtract = !subtract;
16583 if (value < 0)
16584 as_bad_where (fixP->fx_file, fixP->fx_line,
16585 _("immediate value out of range"));
16586 }
16587
16588 if (rd == REG_SP)
16589 {
16590 if (value & ~0x1fc)
16591 as_bad_where (fixP->fx_file, fixP->fx_line,
16592 _("invalid immediate for stack address calculation"));
16593 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
16594 newval |= value >> 2;
16595 }
16596 else if (rs == REG_PC || rs == REG_SP)
16597 {
16598 if (subtract || value & ~0x3fc)
16599 as_bad_where (fixP->fx_file, fixP->fx_line,
16600 _("invalid immediate for address calculation (value = 0x%08lX)"),
16601 (unsigned long) value);
16602 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
16603 newval |= rd << 8;
16604 newval |= value >> 2;
16605 }
16606 else if (rs == rd)
16607 {
16608 if (value & ~0xff)
16609 as_bad_where (fixP->fx_file, fixP->fx_line,
16610 _("immediate value out of range"));
16611 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
16612 newval |= (rd << 8) | value;
16613 }
16614 else
16615 {
16616 if (value & ~0x7)
16617 as_bad_where (fixP->fx_file, fixP->fx_line,
16618 _("immediate value out of range"));
16619 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
16620 newval |= rd | (rs << 3) | (value << 6);
16621 }
16622 }
16623 md_number_to_chars (buf, newval, THUMB_SIZE);
16624 break;
16625
16626 case BFD_RELOC_ARM_THUMB_IMM:
16627 newval = md_chars_to_number (buf, THUMB_SIZE);
16628 if (value < 0 || value > 255)
16629 as_bad_where (fixP->fx_file, fixP->fx_line,
16630 _("invalid immediate: %ld is too large"),
16631 (long) value);
16632 newval |= value;
16633 md_number_to_chars (buf, newval, THUMB_SIZE);
16634 break;
16635
16636 case BFD_RELOC_ARM_THUMB_SHIFT:
16637 /* 5bit shift value (0..32). LSL cannot take 32. */
16638 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
16639 temp = newval & 0xf800;
16640 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
16641 as_bad_where (fixP->fx_file, fixP->fx_line,
16642 _("invalid shift value: %ld"), (long) value);
16643 /* Shifts of zero must be encoded as LSL. */
16644 if (value == 0)
16645 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
16646 /* Shifts of 32 are encoded as zero. */
16647 else if (value == 32)
16648 value = 0;
16649 newval |= value << 6;
16650 md_number_to_chars (buf, newval, THUMB_SIZE);
16651 break;
16652
16653 case BFD_RELOC_VTABLE_INHERIT:
16654 case BFD_RELOC_VTABLE_ENTRY:
16655 fixP->fx_done = 0;
16656 return;
16657
16658 case BFD_RELOC_UNUSED:
16659 default:
16660 as_bad_where (fixP->fx_file, fixP->fx_line,
16661 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
16662 }
16663 }
16664
16665 /* Translate internal representation of relocation info to BFD target
16666 format. */
16667
16668 arelent *
16669 tc_gen_reloc (asection *section, fixS *fixp)
16670 {
16671 arelent * reloc;
16672 bfd_reloc_code_real_type code;
16673
16674 reloc = xmalloc (sizeof (arelent));
16675
16676 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
16677 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
16678 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
16679
16680 if (fixp->fx_pcrel)
16681 {
16682 if (section->use_rela_p)
16683 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
16684 else
16685 fixp->fx_offset = reloc->address;
16686 }
16687 reloc->addend = fixp->fx_offset;
16688
16689 switch (fixp->fx_r_type)
16690 {
16691 case BFD_RELOC_8:
16692 if (fixp->fx_pcrel)
16693 {
16694 code = BFD_RELOC_8_PCREL;
16695 break;
16696 }
16697
16698 case BFD_RELOC_16:
16699 if (fixp->fx_pcrel)
16700 {
16701 code = BFD_RELOC_16_PCREL;
16702 break;
16703 }
16704
16705 case BFD_RELOC_32:
16706 if (fixp->fx_pcrel)
16707 {
16708 code = BFD_RELOC_32_PCREL;
16709 break;
16710 }
16711
16712 case BFD_RELOC_NONE:
16713 case BFD_RELOC_ARM_PCREL_BRANCH:
16714 case BFD_RELOC_ARM_PCREL_BLX:
16715 case BFD_RELOC_RVA:
16716 case BFD_RELOC_THUMB_PCREL_BRANCH7:
16717 case BFD_RELOC_THUMB_PCREL_BRANCH9:
16718 case BFD_RELOC_THUMB_PCREL_BRANCH12:
16719 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16720 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16721 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16722 case BFD_RELOC_THUMB_PCREL_BLX:
16723 case BFD_RELOC_VTABLE_ENTRY:
16724 case BFD_RELOC_VTABLE_INHERIT:
16725 code = fixp->fx_r_type;
16726 break;
16727
16728 case BFD_RELOC_ARM_LITERAL:
16729 case BFD_RELOC_ARM_HWLITERAL:
16730 /* If this is called then the a literal has
16731 been referenced across a section boundary. */
16732 as_bad_where (fixp->fx_file, fixp->fx_line,
16733 _("literal referenced across section boundary"));
16734 return NULL;
16735
16736 #ifdef OBJ_ELF
16737 case BFD_RELOC_ARM_GOT32:
16738 case BFD_RELOC_ARM_GOTOFF:
16739 case BFD_RELOC_ARM_PLT32:
16740 case BFD_RELOC_ARM_TARGET1:
16741 case BFD_RELOC_ARM_ROSEGREL32:
16742 case BFD_RELOC_ARM_SBREL32:
16743 case BFD_RELOC_ARM_PREL31:
16744 case BFD_RELOC_ARM_TARGET2:
16745 case BFD_RELOC_ARM_TLS_LE32:
16746 case BFD_RELOC_ARM_TLS_LDO32:
16747 case BFD_RELOC_ARM_PCREL_CALL:
16748 case BFD_RELOC_ARM_PCREL_JUMP:
16749 code = fixp->fx_r_type;
16750 break;
16751
16752 case BFD_RELOC_ARM_TLS_GD32:
16753 case BFD_RELOC_ARM_TLS_IE32:
16754 case BFD_RELOC_ARM_TLS_LDM32:
16755 /* BFD will include the symbol's address in the addend.
16756 But we don't want that, so subtract it out again here. */
16757 if (!S_IS_COMMON (fixp->fx_addsy))
16758 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
16759 code = fixp->fx_r_type;
16760 break;
16761 #endif
16762
16763 case BFD_RELOC_ARM_IMMEDIATE:
16764 as_bad_where (fixp->fx_file, fixp->fx_line,
16765 _("internal relocation (type: IMMEDIATE) not fixed up"));
16766 return NULL;
16767
16768 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
16769 as_bad_where (fixp->fx_file, fixp->fx_line,
16770 _("ADRL used for a symbol not defined in the same file"));
16771 return NULL;
16772
16773 case BFD_RELOC_ARM_OFFSET_IMM:
16774 if (section->use_rela_p)
16775 {
16776 code = fixp->fx_r_type;
16777 break;
16778 }
16779
16780 if (fixp->fx_addsy != NULL
16781 && !S_IS_DEFINED (fixp->fx_addsy)
16782 && S_IS_LOCAL (fixp->fx_addsy))
16783 {
16784 as_bad_where (fixp->fx_file, fixp->fx_line,
16785 _("undefined local label `%s'"),
16786 S_GET_NAME (fixp->fx_addsy));
16787 return NULL;
16788 }
16789
16790 as_bad_where (fixp->fx_file, fixp->fx_line,
16791 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
16792 return NULL;
16793
16794 default:
16795 {
16796 char * type;
16797
16798 switch (fixp->fx_r_type)
16799 {
16800 case BFD_RELOC_NONE: type = "NONE"; break;
16801 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
16802 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
16803 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
16804 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
16805 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
16806 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
16807 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
16808 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
16809 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
16810 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
16811 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
16812 default: type = _("<unknown>"); break;
16813 }
16814 as_bad_where (fixp->fx_file, fixp->fx_line,
16815 _("cannot represent %s relocation in this object file format"),
16816 type);
16817 return NULL;
16818 }
16819 }
16820
16821 #ifdef OBJ_ELF
16822 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
16823 && GOT_symbol
16824 && fixp->fx_addsy == GOT_symbol)
16825 {
16826 code = BFD_RELOC_ARM_GOTPC;
16827 reloc->addend = fixp->fx_offset = reloc->address;
16828 }
16829 #endif
16830
16831 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
16832
16833 if (reloc->howto == NULL)
16834 {
16835 as_bad_where (fixp->fx_file, fixp->fx_line,
16836 _("cannot represent %s relocation in this object file format"),
16837 bfd_get_reloc_code_name (code));
16838 return NULL;
16839 }
16840
16841 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
16842 vtable entry to be used in the relocation's section offset. */
16843 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
16844 reloc->address = fixp->fx_offset;
16845
16846 return reloc;
16847 }
16848
16849 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
16850
16851 void
16852 cons_fix_new_arm (fragS * frag,
16853 int where,
16854 int size,
16855 expressionS * exp)
16856 {
16857 bfd_reloc_code_real_type type;
16858 int pcrel = 0;
16859
16860 /* Pick a reloc.
16861 FIXME: @@ Should look at CPU word size. */
16862 switch (size)
16863 {
16864 case 1:
16865 type = BFD_RELOC_8;
16866 break;
16867 case 2:
16868 type = BFD_RELOC_16;
16869 break;
16870 case 4:
16871 default:
16872 type = BFD_RELOC_32;
16873 break;
16874 case 8:
16875 type = BFD_RELOC_64;
16876 break;
16877 }
16878
16879 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
16880 }
16881
16882 #if defined OBJ_COFF || defined OBJ_ELF
16883 void
16884 arm_validate_fix (fixS * fixP)
16885 {
16886 /* If the destination of the branch is a defined symbol which does not have
16887 the THUMB_FUNC attribute, then we must be calling a function which has
16888 the (interfacearm) attribute. We look for the Thumb entry point to that
16889 function and change the branch to refer to that function instead. */
16890 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
16891 && fixP->fx_addsy != NULL
16892 && S_IS_DEFINED (fixP->fx_addsy)
16893 && ! THUMB_IS_FUNC (fixP->fx_addsy))
16894 {
16895 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
16896 }
16897 }
16898 #endif
16899
16900 int
16901 arm_force_relocation (struct fix * fixp)
16902 {
16903 #if defined (OBJ_COFF) && defined (TE_PE)
16904 if (fixp->fx_r_type == BFD_RELOC_RVA)
16905 return 1;
16906 #endif
16907
16908 /* Resolve these relocations even if the symbol is extern or weak. */
16909 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
16910 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
16911 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
16912 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
16913 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
16914 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
16915 return 0;
16916
16917 return generic_force_reloc (fixp);
16918 }
16919
16920 #ifdef OBJ_COFF
16921 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
16922 local labels from being added to the output symbol table when they
16923 are used with the ADRL pseudo op. The ADRL relocation should always
16924 be resolved before the binbary is emitted, so it is safe to say that
16925 it is adjustable. */
16926
16927 bfd_boolean
16928 arm_fix_adjustable (fixS * fixP)
16929 {
16930 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
16931 return 1;
16932 return 0;
16933 }
16934 #endif
16935
16936 #ifdef OBJ_ELF
16937 /* Relocations against Thumb function names must be left unadjusted,
16938 so that the linker can use this information to correctly set the
16939 bottom bit of their addresses. The MIPS version of this function
16940 also prevents relocations that are mips-16 specific, but I do not
16941 know why it does this.
16942
16943 FIXME:
16944 There is one other problem that ought to be addressed here, but
16945 which currently is not: Taking the address of a label (rather
16946 than a function) and then later jumping to that address. Such
16947 addresses also ought to have their bottom bit set (assuming that
16948 they reside in Thumb code), but at the moment they will not. */
16949
16950 bfd_boolean
16951 arm_fix_adjustable (fixS * fixP)
16952 {
16953 if (fixP->fx_addsy == NULL)
16954 return 1;
16955
16956 if (THUMB_IS_FUNC (fixP->fx_addsy)
16957 && fixP->fx_subsy == NULL)
16958 return 0;
16959
16960 /* We need the symbol name for the VTABLE entries. */
16961 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
16962 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
16963 return 0;
16964
16965 /* Don't allow symbols to be discarded on GOT related relocs. */
16966 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
16967 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
16968 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
16969 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
16970 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
16971 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
16972 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
16973 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
16974 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
16975 return 0;
16976
16977 return 1;
16978 }
16979
16980 const char *
16981 elf32_arm_target_format (void)
16982 {
16983 #ifdef TE_SYMBIAN
16984 return (target_big_endian
16985 ? "elf32-bigarm-symbian"
16986 : "elf32-littlearm-symbian");
16987 #elif defined (TE_VXWORKS)
16988 return (target_big_endian
16989 ? "elf32-bigarm-vxworks"
16990 : "elf32-littlearm-vxworks");
16991 #else
16992 if (target_big_endian)
16993 return "elf32-bigarm";
16994 else
16995 return "elf32-littlearm";
16996 #endif
16997 }
16998
16999 void
17000 armelf_frob_symbol (symbolS * symp,
17001 int * puntp)
17002 {
17003 elf_frob_symbol (symp, puntp);
17004 }
17005 #endif
17006
17007 /* MD interface: Finalization. */
17008
17009 /* A good place to do this, although this was probably not intended
17010 for this kind of use. We need to dump the literal pool before
17011 references are made to a null symbol pointer. */
17012
17013 void
17014 arm_cleanup (void)
17015 {
17016 literal_pool * pool;
17017
17018 for (pool = list_of_pools; pool; pool = pool->next)
17019 {
17020 /* Put it at the end of the relevent section. */
17021 subseg_set (pool->section, pool->sub_section);
17022 #ifdef OBJ_ELF
17023 arm_elf_change_section ();
17024 #endif
17025 s_ltorg (0);
17026 }
17027 }
17028
17029 /* Adjust the symbol table. This marks Thumb symbols as distinct from
17030 ARM ones. */
17031
17032 void
17033 arm_adjust_symtab (void)
17034 {
17035 #ifdef OBJ_COFF
17036 symbolS * sym;
17037
17038 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17039 {
17040 if (ARM_IS_THUMB (sym))
17041 {
17042 if (THUMB_IS_FUNC (sym))
17043 {
17044 /* Mark the symbol as a Thumb function. */
17045 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
17046 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
17047 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
17048
17049 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
17050 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
17051 else
17052 as_bad (_("%s: unexpected function type: %d"),
17053 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
17054 }
17055 else switch (S_GET_STORAGE_CLASS (sym))
17056 {
17057 case C_EXT:
17058 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
17059 break;
17060 case C_STAT:
17061 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
17062 break;
17063 case C_LABEL:
17064 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
17065 break;
17066 default:
17067 /* Do nothing. */
17068 break;
17069 }
17070 }
17071
17072 if (ARM_IS_INTERWORK (sym))
17073 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
17074 }
17075 #endif
17076 #ifdef OBJ_ELF
17077 symbolS * sym;
17078 char bind;
17079
17080 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17081 {
17082 if (ARM_IS_THUMB (sym))
17083 {
17084 elf_symbol_type * elf_sym;
17085
17086 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
17087 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
17088
17089 if (! bfd_is_arm_mapping_symbol_name (elf_sym->symbol.name))
17090 {
17091 /* If it's a .thumb_func, declare it as so,
17092 otherwise tag label as .code 16. */
17093 if (THUMB_IS_FUNC (sym))
17094 elf_sym->internal_elf_sym.st_info =
17095 ELF_ST_INFO (bind, STT_ARM_TFUNC);
17096 else
17097 elf_sym->internal_elf_sym.st_info =
17098 ELF_ST_INFO (bind, STT_ARM_16BIT);
17099 }
17100 }
17101 }
17102 #endif
17103 }
17104
17105 /* MD interface: Initialization. */
17106
17107 static void
17108 set_constant_flonums (void)
17109 {
17110 int i;
17111
17112 for (i = 0; i < NUM_FLOAT_VALS; i++)
17113 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
17114 abort ();
17115 }
17116
17117 void
17118 md_begin (void)
17119 {
17120 unsigned mach;
17121 unsigned int i;
17122
17123 if ( (arm_ops_hsh = hash_new ()) == NULL
17124 || (arm_cond_hsh = hash_new ()) == NULL
17125 || (arm_shift_hsh = hash_new ()) == NULL
17126 || (arm_psr_hsh = hash_new ()) == NULL
17127 || (arm_v7m_psr_hsh = hash_new ()) == NULL
17128 || (arm_reg_hsh = hash_new ()) == NULL
17129 || (arm_reloc_hsh = hash_new ()) == NULL
17130 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
17131 as_fatal (_("virtual memory exhausted"));
17132
17133 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
17134 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
17135 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
17136 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
17137 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
17138 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
17139 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
17140 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
17141 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
17142 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
17143 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
17144 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
17145 for (i = 0;
17146 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
17147 i++)
17148 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
17149 (PTR) (barrier_opt_names + i));
17150 #ifdef OBJ_ELF
17151 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
17152 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
17153 #endif
17154
17155 set_constant_flonums ();
17156
17157 /* Set the cpu variant based on the command-line options. We prefer
17158 -mcpu= over -march= if both are set (as for GCC); and we prefer
17159 -mfpu= over any other way of setting the floating point unit.
17160 Use of legacy options with new options are faulted. */
17161 if (legacy_cpu)
17162 {
17163 if (mcpu_cpu_opt || march_cpu_opt)
17164 as_bad (_("use of old and new-style options to set CPU type"));
17165
17166 mcpu_cpu_opt = legacy_cpu;
17167 }
17168 else if (!mcpu_cpu_opt)
17169 mcpu_cpu_opt = march_cpu_opt;
17170
17171 if (legacy_fpu)
17172 {
17173 if (mfpu_opt)
17174 as_bad (_("use of old and new-style options to set FPU type"));
17175
17176 mfpu_opt = legacy_fpu;
17177 }
17178 else if (!mfpu_opt)
17179 {
17180 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17181 /* Some environments specify a default FPU. If they don't, infer it
17182 from the processor. */
17183 if (mcpu_fpu_opt)
17184 mfpu_opt = mcpu_fpu_opt;
17185 else
17186 mfpu_opt = march_fpu_opt;
17187 #else
17188 mfpu_opt = &fpu_default;
17189 #endif
17190 }
17191
17192 if (!mfpu_opt)
17193 {
17194 if (!mcpu_cpu_opt)
17195 mfpu_opt = &fpu_default;
17196 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
17197 mfpu_opt = &fpu_arch_vfp_v2;
17198 else
17199 mfpu_opt = &fpu_arch_fpa;
17200 }
17201
17202 #ifdef CPU_DEFAULT
17203 if (!mcpu_cpu_opt)
17204 {
17205 mcpu_cpu_opt = &cpu_default;
17206 selected_cpu = cpu_default;
17207 }
17208 #else
17209 if (mcpu_cpu_opt)
17210 selected_cpu = *mcpu_cpu_opt;
17211 else
17212 mcpu_cpu_opt = &arm_arch_any;
17213 #endif
17214
17215 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17216
17217 arm_arch_used = thumb_arch_used = arm_arch_none;
17218
17219 #if defined OBJ_COFF || defined OBJ_ELF
17220 {
17221 unsigned int flags = 0;
17222
17223 #if defined OBJ_ELF
17224 flags = meabi_flags;
17225
17226 switch (meabi_flags)
17227 {
17228 case EF_ARM_EABI_UNKNOWN:
17229 #endif
17230 /* Set the flags in the private structure. */
17231 if (uses_apcs_26) flags |= F_APCS26;
17232 if (support_interwork) flags |= F_INTERWORK;
17233 if (uses_apcs_float) flags |= F_APCS_FLOAT;
17234 if (pic_code) flags |= F_PIC;
17235 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
17236 flags |= F_SOFT_FLOAT;
17237
17238 switch (mfloat_abi_opt)
17239 {
17240 case ARM_FLOAT_ABI_SOFT:
17241 case ARM_FLOAT_ABI_SOFTFP:
17242 flags |= F_SOFT_FLOAT;
17243 break;
17244
17245 case ARM_FLOAT_ABI_HARD:
17246 if (flags & F_SOFT_FLOAT)
17247 as_bad (_("hard-float conflicts with specified fpu"));
17248 break;
17249 }
17250
17251 /* Using pure-endian doubles (even if soft-float). */
17252 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
17253 flags |= F_VFP_FLOAT;
17254
17255 #if defined OBJ_ELF
17256 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
17257 flags |= EF_ARM_MAVERICK_FLOAT;
17258 break;
17259
17260 case EF_ARM_EABI_VER4:
17261 case EF_ARM_EABI_VER5:
17262 /* No additional flags to set. */
17263 break;
17264
17265 default:
17266 abort ();
17267 }
17268 #endif
17269 bfd_set_private_flags (stdoutput, flags);
17270
17271 /* We have run out flags in the COFF header to encode the
17272 status of ATPCS support, so instead we create a dummy,
17273 empty, debug section called .arm.atpcs. */
17274 if (atpcs)
17275 {
17276 asection * sec;
17277
17278 sec = bfd_make_section (stdoutput, ".arm.atpcs");
17279
17280 if (sec != NULL)
17281 {
17282 bfd_set_section_flags
17283 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
17284 bfd_set_section_size (stdoutput, sec, 0);
17285 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
17286 }
17287 }
17288 }
17289 #endif
17290
17291 /* Record the CPU type as well. */
17292 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
17293 mach = bfd_mach_arm_iWMMXt;
17294 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
17295 mach = bfd_mach_arm_XScale;
17296 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
17297 mach = bfd_mach_arm_ep9312;
17298 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
17299 mach = bfd_mach_arm_5TE;
17300 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
17301 {
17302 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17303 mach = bfd_mach_arm_5T;
17304 else
17305 mach = bfd_mach_arm_5;
17306 }
17307 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
17308 {
17309 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17310 mach = bfd_mach_arm_4T;
17311 else
17312 mach = bfd_mach_arm_4;
17313 }
17314 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
17315 mach = bfd_mach_arm_3M;
17316 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
17317 mach = bfd_mach_arm_3;
17318 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
17319 mach = bfd_mach_arm_2a;
17320 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
17321 mach = bfd_mach_arm_2;
17322 else
17323 mach = bfd_mach_arm_unknown;
17324
17325 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
17326 }
17327
17328 /* Command line processing. */
17329
17330 /* md_parse_option
17331 Invocation line includes a switch not recognized by the base assembler.
17332 See if it's a processor-specific option.
17333
17334 This routine is somewhat complicated by the need for backwards
17335 compatibility (since older releases of gcc can't be changed).
17336 The new options try to make the interface as compatible as
17337 possible with GCC.
17338
17339 New options (supported) are:
17340
17341 -mcpu=<cpu name> Assemble for selected processor
17342 -march=<architecture name> Assemble for selected architecture
17343 -mfpu=<fpu architecture> Assemble for selected FPU.
17344 -EB/-mbig-endian Big-endian
17345 -EL/-mlittle-endian Little-endian
17346 -k Generate PIC code
17347 -mthumb Start in Thumb mode
17348 -mthumb-interwork Code supports ARM/Thumb interworking
17349
17350 For now we will also provide support for:
17351
17352 -mapcs-32 32-bit Program counter
17353 -mapcs-26 26-bit Program counter
17354 -macps-float Floats passed in FP registers
17355 -mapcs-reentrant Reentrant code
17356 -matpcs
17357 (sometime these will probably be replaced with -mapcs=<list of options>
17358 and -matpcs=<list of options>)
17359
17360 The remaining options are only supported for back-wards compatibility.
17361 Cpu variants, the arm part is optional:
17362 -m[arm]1 Currently not supported.
17363 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17364 -m[arm]3 Arm 3 processor
17365 -m[arm]6[xx], Arm 6 processors
17366 -m[arm]7[xx][t][[d]m] Arm 7 processors
17367 -m[arm]8[10] Arm 8 processors
17368 -m[arm]9[20][tdmi] Arm 9 processors
17369 -mstrongarm[110[0]] StrongARM processors
17370 -mxscale XScale processors
17371 -m[arm]v[2345[t[e]]] Arm architectures
17372 -mall All (except the ARM1)
17373 FP variants:
17374 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17375 -mfpe-old (No float load/store multiples)
17376 -mvfpxd VFP Single precision
17377 -mvfp All VFP
17378 -mno-fpu Disable all floating point instructions
17379
17380 The following CPU names are recognized:
17381 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17382 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17383 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17384 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17385 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17386 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17387 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17388
17389 */
17390
17391 const char * md_shortopts = "m:k";
17392
17393 #ifdef ARM_BI_ENDIAN
17394 #define OPTION_EB (OPTION_MD_BASE + 0)
17395 #define OPTION_EL (OPTION_MD_BASE + 1)
17396 #else
17397 #if TARGET_BYTES_BIG_ENDIAN
17398 #define OPTION_EB (OPTION_MD_BASE + 0)
17399 #else
17400 #define OPTION_EL (OPTION_MD_BASE + 1)
17401 #endif
17402 #endif
17403
17404 struct option md_longopts[] =
17405 {
17406 #ifdef OPTION_EB
17407 {"EB", no_argument, NULL, OPTION_EB},
17408 #endif
17409 #ifdef OPTION_EL
17410 {"EL", no_argument, NULL, OPTION_EL},
17411 #endif
17412 {NULL, no_argument, NULL, 0}
17413 };
17414
17415 size_t md_longopts_size = sizeof (md_longopts);
17416
17417 struct arm_option_table
17418 {
17419 char *option; /* Option name to match. */
17420 char *help; /* Help information. */
17421 int *var; /* Variable to change. */
17422 int value; /* What to change it to. */
17423 char *deprecated; /* If non-null, print this message. */
17424 };
17425
17426 struct arm_option_table arm_opts[] =
17427 {
17428 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
17429 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
17430 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17431 &support_interwork, 1, NULL},
17432 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
17433 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
17434 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
17435 1, NULL},
17436 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
17437 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
17438 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
17439 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
17440 NULL},
17441
17442 /* These are recognized by the assembler, but have no affect on code. */
17443 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
17444 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
17445 {NULL, NULL, NULL, 0, NULL}
17446 };
17447
17448 struct arm_legacy_option_table
17449 {
17450 char *option; /* Option name to match. */
17451 const arm_feature_set **var; /* Variable to change. */
17452 const arm_feature_set value; /* What to change it to. */
17453 char *deprecated; /* If non-null, print this message. */
17454 };
17455
17456 const struct arm_legacy_option_table arm_legacy_opts[] =
17457 {
17458 /* DON'T add any new processors to this list -- we want the whole list
17459 to go away... Add them to the processors table instead. */
17460 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17461 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17462 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17463 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17464 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17465 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17466 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17467 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17468 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17469 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17470 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17471 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17472 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17473 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17474 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17475 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17476 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17477 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17478 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17479 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17480 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17481 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17482 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17483 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17484 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17485 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17486 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17487 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17488 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17489 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17490 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17491 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17492 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17493 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17494 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17495 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17496 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17497 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17498 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17499 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17500 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17501 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17502 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17503 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17504 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17505 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17506 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17507 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17508 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17509 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17510 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17511 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17512 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17513 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17514 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17515 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17516 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17517 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17518 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17519 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17520 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17521 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17522 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17523 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17524 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17525 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17526 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17527 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17528 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
17529 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
17530 N_("use -mcpu=strongarm110")},
17531 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
17532 N_("use -mcpu=strongarm1100")},
17533 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
17534 N_("use -mcpu=strongarm1110")},
17535 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
17536 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
17537 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
17538
17539 /* Architecture variants -- don't add any more to this list either. */
17540 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17541 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17542 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17543 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17544 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17545 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17546 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17547 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17548 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17549 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17550 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17551 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17552 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17553 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17554 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17555 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17556 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17557 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17558
17559 /* Floating point variants -- don't add any more to this list either. */
17560 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
17561 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
17562 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
17563 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
17564 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17565
17566 {NULL, NULL, ARM_ARCH_NONE, NULL}
17567 };
17568
17569 struct arm_cpu_option_table
17570 {
17571 char *name;
17572 const arm_feature_set value;
17573 /* For some CPUs we assume an FPU unless the user explicitly sets
17574 -mfpu=... */
17575 const arm_feature_set default_fpu;
17576 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17577 case. */
17578 const char *canonical_name;
17579 };
17580
17581 /* This list should, at a minimum, contain all the cpu names
17582 recognized by GCC. */
17583 static const struct arm_cpu_option_table arm_cpus[] =
17584 {
17585 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
17586 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
17587 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
17588 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17589 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17590 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17591 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17592 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17593 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17594 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17595 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17596 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17597 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17598 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17599 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17600 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17601 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17602 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17603 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17604 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17605 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17606 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17607 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17608 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17609 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17610 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17611 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17612 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17613 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17614 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17615 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17616 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17617 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17618 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17619 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17620 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17621 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17622 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17623 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17624 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
17625 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17626 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17627 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17628 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17629 /* For V5 or later processors we default to using VFP; but the user
17630 should really set the FPU type explicitly. */
17631 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17632 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17633 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17634 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17635 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17636 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17637 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
17638 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17639 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17640 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
17641 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17642 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17643 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17644 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17645 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17646 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
17647 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17648 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17649 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17650 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
17651 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17652 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
17653 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
17654 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
17655 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
17656 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
17657 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
17658 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
17659 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
17660 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
17661 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
17662 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
17663 | FPU_NEON_EXT_V1),
17664 NULL},
17665 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
17666 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
17667 /* ??? XSCALE is really an architecture. */
17668 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17669 /* ??? iwmmxt is not a processor. */
17670 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
17671 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17672 /* Maverick */
17673 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
17674 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
17675 };
17676
17677 struct arm_arch_option_table
17678 {
17679 char *name;
17680 const arm_feature_set value;
17681 const arm_feature_set default_fpu;
17682 };
17683
17684 /* This list should, at a minimum, contain all the architecture names
17685 recognized by GCC. */
17686 static const struct arm_arch_option_table arm_archs[] =
17687 {
17688 {"all", ARM_ANY, FPU_ARCH_FPA},
17689 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
17690 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
17691 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
17692 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
17693 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
17694 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
17695 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
17696 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
17697 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
17698 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
17699 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
17700 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
17701 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
17702 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
17703 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
17704 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
17705 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
17706 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
17707 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
17708 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
17709 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
17710 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
17711 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
17712 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
17713 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
17714 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
17715 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
17716 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
17717 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
17718 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
17719 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
17720 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
17721 };
17722
17723 /* ISA extensions in the co-processor space. */
17724 struct arm_option_cpu_value_table
17725 {
17726 char *name;
17727 const arm_feature_set value;
17728 };
17729
17730 static const struct arm_option_cpu_value_table arm_extensions[] =
17731 {
17732 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
17733 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
17734 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
17735 {NULL, ARM_ARCH_NONE}
17736 };
17737
17738 /* This list should, at a minimum, contain all the fpu names
17739 recognized by GCC. */
17740 static const struct arm_option_cpu_value_table arm_fpus[] =
17741 {
17742 {"softfpa", FPU_NONE},
17743 {"fpe", FPU_ARCH_FPE},
17744 {"fpe2", FPU_ARCH_FPE},
17745 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
17746 {"fpa", FPU_ARCH_FPA},
17747 {"fpa10", FPU_ARCH_FPA},
17748 {"fpa11", FPU_ARCH_FPA},
17749 {"arm7500fe", FPU_ARCH_FPA},
17750 {"softvfp", FPU_ARCH_VFP},
17751 {"softvfp+vfp", FPU_ARCH_VFP_V2},
17752 {"vfp", FPU_ARCH_VFP_V2},
17753 {"vfp9", FPU_ARCH_VFP_V2},
17754 {"vfp3", FPU_ARCH_VFP_V3},
17755 {"vfp10", FPU_ARCH_VFP_V2},
17756 {"vfp10-r0", FPU_ARCH_VFP_V1},
17757 {"vfpxd", FPU_ARCH_VFP_V1xD},
17758 {"arm1020t", FPU_ARCH_VFP_V1},
17759 {"arm1020e", FPU_ARCH_VFP_V2},
17760 {"arm1136jfs", FPU_ARCH_VFP_V2},
17761 {"arm1136jf-s", FPU_ARCH_VFP_V2},
17762 {"maverick", FPU_ARCH_MAVERICK},
17763 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
17764 {NULL, ARM_ARCH_NONE}
17765 };
17766
17767 struct arm_option_value_table
17768 {
17769 char *name;
17770 long value;
17771 };
17772
17773 static const struct arm_option_value_table arm_float_abis[] =
17774 {
17775 {"hard", ARM_FLOAT_ABI_HARD},
17776 {"softfp", ARM_FLOAT_ABI_SOFTFP},
17777 {"soft", ARM_FLOAT_ABI_SOFT},
17778 {NULL, 0}
17779 };
17780
17781 #ifdef OBJ_ELF
17782 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
17783 static const struct arm_option_value_table arm_eabis[] =
17784 {
17785 {"gnu", EF_ARM_EABI_UNKNOWN},
17786 {"4", EF_ARM_EABI_VER4},
17787 {"5", EF_ARM_EABI_VER5},
17788 {NULL, 0}
17789 };
17790 #endif
17791
17792 struct arm_long_option_table
17793 {
17794 char * option; /* Substring to match. */
17795 char * help; /* Help information. */
17796 int (* func) (char * subopt); /* Function to decode sub-option. */
17797 char * deprecated; /* If non-null, print this message. */
17798 };
17799
17800 static int
17801 arm_parse_extension (char * str, const arm_feature_set **opt_p)
17802 {
17803 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
17804
17805 /* Copy the feature set, so that we can modify it. */
17806 *ext_set = **opt_p;
17807 *opt_p = ext_set;
17808
17809 while (str != NULL && *str != 0)
17810 {
17811 const struct arm_option_cpu_value_table * opt;
17812 char * ext;
17813 int optlen;
17814
17815 if (*str != '+')
17816 {
17817 as_bad (_("invalid architectural extension"));
17818 return 0;
17819 }
17820
17821 str++;
17822 ext = strchr (str, '+');
17823
17824 if (ext != NULL)
17825 optlen = ext - str;
17826 else
17827 optlen = strlen (str);
17828
17829 if (optlen == 0)
17830 {
17831 as_bad (_("missing architectural extension"));
17832 return 0;
17833 }
17834
17835 for (opt = arm_extensions; opt->name != NULL; opt++)
17836 if (strncmp (opt->name, str, optlen) == 0)
17837 {
17838 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
17839 break;
17840 }
17841
17842 if (opt->name == NULL)
17843 {
17844 as_bad (_("unknown architectural extnsion `%s'"), str);
17845 return 0;
17846 }
17847
17848 str = ext;
17849 };
17850
17851 return 1;
17852 }
17853
17854 static int
17855 arm_parse_cpu (char * str)
17856 {
17857 const struct arm_cpu_option_table * opt;
17858 char * ext = strchr (str, '+');
17859 int optlen;
17860
17861 if (ext != NULL)
17862 optlen = ext - str;
17863 else
17864 optlen = strlen (str);
17865
17866 if (optlen == 0)
17867 {
17868 as_bad (_("missing cpu name `%s'"), str);
17869 return 0;
17870 }
17871
17872 for (opt = arm_cpus; opt->name != NULL; opt++)
17873 if (strncmp (opt->name, str, optlen) == 0)
17874 {
17875 mcpu_cpu_opt = &opt->value;
17876 mcpu_fpu_opt = &opt->default_fpu;
17877 if (opt->canonical_name)
17878 strcpy(selected_cpu_name, opt->canonical_name);
17879 else
17880 {
17881 int i;
17882 for (i = 0; i < optlen; i++)
17883 selected_cpu_name[i] = TOUPPER (opt->name[i]);
17884 selected_cpu_name[i] = 0;
17885 }
17886
17887 if (ext != NULL)
17888 return arm_parse_extension (ext, &mcpu_cpu_opt);
17889
17890 return 1;
17891 }
17892
17893 as_bad (_("unknown cpu `%s'"), str);
17894 return 0;
17895 }
17896
17897 static int
17898 arm_parse_arch (char * str)
17899 {
17900 const struct arm_arch_option_table *opt;
17901 char *ext = strchr (str, '+');
17902 int optlen;
17903
17904 if (ext != NULL)
17905 optlen = ext - str;
17906 else
17907 optlen = strlen (str);
17908
17909 if (optlen == 0)
17910 {
17911 as_bad (_("missing architecture name `%s'"), str);
17912 return 0;
17913 }
17914
17915 for (opt = arm_archs; opt->name != NULL; opt++)
17916 if (streq (opt->name, str))
17917 {
17918 march_cpu_opt = &opt->value;
17919 march_fpu_opt = &opt->default_fpu;
17920 strcpy(selected_cpu_name, opt->name);
17921
17922 if (ext != NULL)
17923 return arm_parse_extension (ext, &march_cpu_opt);
17924
17925 return 1;
17926 }
17927
17928 as_bad (_("unknown architecture `%s'\n"), str);
17929 return 0;
17930 }
17931
17932 static int
17933 arm_parse_fpu (char * str)
17934 {
17935 const struct arm_option_cpu_value_table * opt;
17936
17937 for (opt = arm_fpus; opt->name != NULL; opt++)
17938 if (streq (opt->name, str))
17939 {
17940 mfpu_opt = &opt->value;
17941 return 1;
17942 }
17943
17944 as_bad (_("unknown floating point format `%s'\n"), str);
17945 return 0;
17946 }
17947
17948 static int
17949 arm_parse_float_abi (char * str)
17950 {
17951 const struct arm_option_value_table * opt;
17952
17953 for (opt = arm_float_abis; opt->name != NULL; opt++)
17954 if (streq (opt->name, str))
17955 {
17956 mfloat_abi_opt = opt->value;
17957 return 1;
17958 }
17959
17960 as_bad (_("unknown floating point abi `%s'\n"), str);
17961 return 0;
17962 }
17963
17964 #ifdef OBJ_ELF
17965 static int
17966 arm_parse_eabi (char * str)
17967 {
17968 const struct arm_option_value_table *opt;
17969
17970 for (opt = arm_eabis; opt->name != NULL; opt++)
17971 if (streq (opt->name, str))
17972 {
17973 meabi_flags = opt->value;
17974 return 1;
17975 }
17976 as_bad (_("unknown EABI `%s'\n"), str);
17977 return 0;
17978 }
17979 #endif
17980
17981 struct arm_long_option_table arm_long_opts[] =
17982 {
17983 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
17984 arm_parse_cpu, NULL},
17985 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
17986 arm_parse_arch, NULL},
17987 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
17988 arm_parse_fpu, NULL},
17989 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
17990 arm_parse_float_abi, NULL},
17991 #ifdef OBJ_ELF
17992 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
17993 arm_parse_eabi, NULL},
17994 #endif
17995 {NULL, NULL, 0, NULL}
17996 };
17997
17998 int
17999 md_parse_option (int c, char * arg)
18000 {
18001 struct arm_option_table *opt;
18002 const struct arm_legacy_option_table *fopt;
18003 struct arm_long_option_table *lopt;
18004
18005 switch (c)
18006 {
18007 #ifdef OPTION_EB
18008 case OPTION_EB:
18009 target_big_endian = 1;
18010 break;
18011 #endif
18012
18013 #ifdef OPTION_EL
18014 case OPTION_EL:
18015 target_big_endian = 0;
18016 break;
18017 #endif
18018
18019 case 'a':
18020 /* Listing option. Just ignore these, we don't support additional
18021 ones. */
18022 return 0;
18023
18024 default:
18025 for (opt = arm_opts; opt->option != NULL; opt++)
18026 {
18027 if (c == opt->option[0]
18028 && ((arg == NULL && opt->option[1] == 0)
18029 || streq (arg, opt->option + 1)))
18030 {
18031 #if WARN_DEPRECATED
18032 /* If the option is deprecated, tell the user. */
18033 if (opt->deprecated != NULL)
18034 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18035 arg ? arg : "", _(opt->deprecated));
18036 #endif
18037
18038 if (opt->var != NULL)
18039 *opt->var = opt->value;
18040
18041 return 1;
18042 }
18043 }
18044
18045 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
18046 {
18047 if (c == fopt->option[0]
18048 && ((arg == NULL && fopt->option[1] == 0)
18049 || streq (arg, fopt->option + 1)))
18050 {
18051 #if WARN_DEPRECATED
18052 /* If the option is deprecated, tell the user. */
18053 if (fopt->deprecated != NULL)
18054 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18055 arg ? arg : "", _(fopt->deprecated));
18056 #endif
18057
18058 if (fopt->var != NULL)
18059 *fopt->var = &fopt->value;
18060
18061 return 1;
18062 }
18063 }
18064
18065 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18066 {
18067 /* These options are expected to have an argument. */
18068 if (c == lopt->option[0]
18069 && arg != NULL
18070 && strncmp (arg, lopt->option + 1,
18071 strlen (lopt->option + 1)) == 0)
18072 {
18073 #if WARN_DEPRECATED
18074 /* If the option is deprecated, tell the user. */
18075 if (lopt->deprecated != NULL)
18076 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
18077 _(lopt->deprecated));
18078 #endif
18079
18080 /* Call the sup-option parser. */
18081 return lopt->func (arg + strlen (lopt->option) - 1);
18082 }
18083 }
18084
18085 return 0;
18086 }
18087
18088 return 1;
18089 }
18090
18091 void
18092 md_show_usage (FILE * fp)
18093 {
18094 struct arm_option_table *opt;
18095 struct arm_long_option_table *lopt;
18096
18097 fprintf (fp, _(" ARM-specific assembler options:\n"));
18098
18099 for (opt = arm_opts; opt->option != NULL; opt++)
18100 if (opt->help != NULL)
18101 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
18102
18103 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18104 if (lopt->help != NULL)
18105 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
18106
18107 #ifdef OPTION_EB
18108 fprintf (fp, _("\
18109 -EB assemble code for a big-endian cpu\n"));
18110 #endif
18111
18112 #ifdef OPTION_EL
18113 fprintf (fp, _("\
18114 -EL assemble code for a little-endian cpu\n"));
18115 #endif
18116 }
18117
18118
18119 #ifdef OBJ_ELF
18120 typedef struct
18121 {
18122 int val;
18123 arm_feature_set flags;
18124 } cpu_arch_ver_table;
18125
18126 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18127 least features first. */
18128 static const cpu_arch_ver_table cpu_arch_ver[] =
18129 {
18130 {1, ARM_ARCH_V4},
18131 {2, ARM_ARCH_V4T},
18132 {3, ARM_ARCH_V5},
18133 {4, ARM_ARCH_V5TE},
18134 {5, ARM_ARCH_V5TEJ},
18135 {6, ARM_ARCH_V6},
18136 {7, ARM_ARCH_V6Z},
18137 {8, ARM_ARCH_V6K},
18138 {9, ARM_ARCH_V6T2},
18139 {10, ARM_ARCH_V7A},
18140 {10, ARM_ARCH_V7R},
18141 {10, ARM_ARCH_V7M},
18142 {0, ARM_ARCH_NONE}
18143 };
18144
18145 /* Set the public EABI object attributes. */
18146 static void
18147 aeabi_set_public_attributes (void)
18148 {
18149 int arch;
18150 arm_feature_set flags;
18151 arm_feature_set tmp;
18152 const cpu_arch_ver_table *p;
18153
18154 /* Choose the architecture based on the capabilities of the requested cpu
18155 (if any) and/or the instructions actually used. */
18156 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
18157 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
18158 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
18159
18160 tmp = flags;
18161 arch = 0;
18162 for (p = cpu_arch_ver; p->val; p++)
18163 {
18164 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
18165 {
18166 arch = p->val;
18167 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
18168 }
18169 }
18170
18171 /* Tag_CPU_name. */
18172 if (selected_cpu_name[0])
18173 {
18174 char *p;
18175
18176 p = selected_cpu_name;
18177 if (strncmp(p, "armv", 4) == 0)
18178 {
18179 int i;
18180
18181 p += 4;
18182 for (i = 0; p[i]; i++)
18183 p[i] = TOUPPER (p[i]);
18184 }
18185 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
18186 }
18187 /* Tag_CPU_arch. */
18188 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
18189 /* Tag_CPU_arch_profile. */
18190 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
18191 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
18192 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
18193 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
18194 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
18195 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
18196 /* Tag_ARM_ISA_use. */
18197 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
18198 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
18199 /* Tag_THUMB_ISA_use. */
18200 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
18201 elf32_arm_add_eabi_attr_int (stdoutput, 9,
18202 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
18203 /* Tag_VFP_arch. */
18204 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
18205 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
18206 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
18207 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
18208 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
18209 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
18210 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
18211 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
18212 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
18213 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
18214 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
18215 /* Tag_WMMX_arch. */
18216 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
18217 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
18218 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
18219 /* Tag_NEON_arch. */
18220 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
18221 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
18222 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
18223 }
18224
18225 /* Add the .ARM.attributes section. */
18226 void
18227 arm_md_end (void)
18228 {
18229 segT s;
18230 char *p;
18231 addressT addr;
18232 offsetT size;
18233
18234 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
18235 return;
18236
18237 aeabi_set_public_attributes ();
18238 size = elf32_arm_eabi_attr_size (stdoutput);
18239 s = subseg_new (".ARM.attributes", 0);
18240 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
18241 addr = frag_now_fix ();
18242 p = frag_more (size);
18243 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
18244 }
18245 #endif /* OBJ_ELF */
18246
18247
18248 /* Parse a .cpu directive. */
18249
18250 static void
18251 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
18252 {
18253 const struct arm_cpu_option_table *opt;
18254 char *name;
18255 char saved_char;
18256
18257 name = input_line_pointer;
18258 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18259 input_line_pointer++;
18260 saved_char = *input_line_pointer;
18261 *input_line_pointer = 0;
18262
18263 /* Skip the first "all" entry. */
18264 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
18265 if (streq (opt->name, name))
18266 {
18267 mcpu_cpu_opt = &opt->value;
18268 selected_cpu = opt->value;
18269 if (opt->canonical_name)
18270 strcpy(selected_cpu_name, opt->canonical_name);
18271 else
18272 {
18273 int i;
18274 for (i = 0; opt->name[i]; i++)
18275 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18276 selected_cpu_name[i] = 0;
18277 }
18278 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18279 *input_line_pointer = saved_char;
18280 demand_empty_rest_of_line ();
18281 return;
18282 }
18283 as_bad (_("unknown cpu `%s'"), name);
18284 *input_line_pointer = saved_char;
18285 ignore_rest_of_line ();
18286 }
18287
18288
18289 /* Parse a .arch directive. */
18290
18291 static void
18292 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
18293 {
18294 const struct arm_arch_option_table *opt;
18295 char saved_char;
18296 char *name;
18297
18298 name = input_line_pointer;
18299 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18300 input_line_pointer++;
18301 saved_char = *input_line_pointer;
18302 *input_line_pointer = 0;
18303
18304 /* Skip the first "all" entry. */
18305 for (opt = arm_archs + 1; opt->name != NULL; opt++)
18306 if (streq (opt->name, name))
18307 {
18308 mcpu_cpu_opt = &opt->value;
18309 selected_cpu = opt->value;
18310 strcpy(selected_cpu_name, opt->name);
18311 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18312 *input_line_pointer = saved_char;
18313 demand_empty_rest_of_line ();
18314 return;
18315 }
18316
18317 as_bad (_("unknown architecture `%s'\n"), name);
18318 *input_line_pointer = saved_char;
18319 ignore_rest_of_line ();
18320 }
18321
18322
18323 /* Parse a .fpu directive. */
18324
18325 static void
18326 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
18327 {
18328 const struct arm_option_cpu_value_table *opt;
18329 char saved_char;
18330 char *name;
18331
18332 name = input_line_pointer;
18333 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18334 input_line_pointer++;
18335 saved_char = *input_line_pointer;
18336 *input_line_pointer = 0;
18337
18338 for (opt = arm_fpus; opt->name != NULL; opt++)
18339 if (streq (opt->name, name))
18340 {
18341 mfpu_opt = &opt->value;
18342 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18343 *input_line_pointer = saved_char;
18344 demand_empty_rest_of_line ();
18345 return;
18346 }
18347
18348 as_bad (_("unknown floating point format `%s'\n"), name);
18349 *input_line_pointer = saved_char;
18350 ignore_rest_of_line ();
18351 }
18352