PR 9814
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #ifdef OBJ_ELF
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
48
49 /* This structure holds the unwinding state. */
50
51 static struct
52 {
53 symbolS * proc_start;
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
58 segT saved_seg;
59 subsegT saved_subseg;
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
62 int opcode_count;
63 int opcode_alloc;
64 /* The number of bytes pushed to the stack. */
65 offsetT frame_size;
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
72 offsetT fp_offset;
73 int fp_reg;
74 /* Nonzero if an unwind_setfp directive has been seen. */
75 unsigned fp_used:1;
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
78 } unwind;
79
80 /* Bit N indicates that an R_ARM_NONE relocation has been output for
81 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
82 emitted only once per section, to save unnecessary bloat. */
83 static unsigned int marked_pr_dependency = 0;
84
85 #endif /* OBJ_ELF */
86
87 /* Results from operand parsing worker functions. */
88
89 typedef enum
90 {
91 PARSE_OPERAND_SUCCESS,
92 PARSE_OPERAND_FAIL,
93 PARSE_OPERAND_FAIL_NO_BACKTRACK
94 } parse_operand_result;
95
96 enum arm_float_abi
97 {
98 ARM_FLOAT_ABI_HARD,
99 ARM_FLOAT_ABI_SOFTFP,
100 ARM_FLOAT_ABI_SOFT
101 };
102
103 /* Types of processor to assemble for. */
104 #ifndef CPU_DEFAULT
105 #if defined __XSCALE__
106 #define CPU_DEFAULT ARM_ARCH_XSCALE
107 #else
108 #if defined __thumb__
109 #define CPU_DEFAULT ARM_ARCH_V5T
110 #endif
111 #endif
112 #endif
113
114 #ifndef FPU_DEFAULT
115 # ifdef TE_LINUX
116 # define FPU_DEFAULT FPU_ARCH_FPA
117 # elif defined (TE_NetBSD)
118 # ifdef OBJ_ELF
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
120 # else
121 /* Legacy a.out format. */
122 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
123 # endif
124 # elif defined (TE_VXWORKS)
125 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
126 # else
127 /* For backwards compatibility, default to FPA. */
128 # define FPU_DEFAULT FPU_ARCH_FPA
129 # endif
130 #endif /* ifndef FPU_DEFAULT */
131
132 #define streq(a, b) (strcmp (a, b) == 0)
133
134 static arm_feature_set cpu_variant;
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148
149 /* Variables that we set while parsing command-line options. Once all
150 options have been read we re-process these values to set the real
151 assembly flags. */
152 static const arm_feature_set *legacy_cpu = NULL;
153 static const arm_feature_set *legacy_fpu = NULL;
154
155 static const arm_feature_set *mcpu_cpu_opt = NULL;
156 static const arm_feature_set *mcpu_fpu_opt = NULL;
157 static const arm_feature_set *march_cpu_opt = NULL;
158 static const arm_feature_set *march_fpu_opt = NULL;
159 static const arm_feature_set *mfpu_opt = NULL;
160 static const arm_feature_set *object_arch = NULL;
161
162 /* Constants for known architecture features. */
163 static const arm_feature_set fpu_default = FPU_DEFAULT;
164 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
165 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
166 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
167 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
168 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
169 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
170 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
171 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
172
173 #ifdef CPU_DEFAULT
174 static const arm_feature_set cpu_default = CPU_DEFAULT;
175 #endif
176
177 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
178 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
179 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
180 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
181 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
182 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
183 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
184 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
185 static const arm_feature_set arm_ext_v4t_5 =
186 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
187 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
188 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
189 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
190 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
191 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
192 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
193 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
194 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
195 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
196 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
197 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
198 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
199 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
200 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
201 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
202 static const arm_feature_set arm_ext_m =
203 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_V7M, 0);
204
205 static const arm_feature_set arm_arch_any = ARM_ANY;
206 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
207 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
208 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
209
210 static const arm_feature_set arm_cext_iwmmxt2 =
211 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
212 static const arm_feature_set arm_cext_iwmmxt =
213 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
214 static const arm_feature_set arm_cext_xscale =
215 ARM_FEATURE (0, ARM_CEXT_XSCALE);
216 static const arm_feature_set arm_cext_maverick =
217 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
218 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
219 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
220 static const arm_feature_set fpu_vfp_ext_v1xd =
221 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
222 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
223 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
224 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
225 static const arm_feature_set fpu_vfp_ext_d32 =
226 ARM_FEATURE (0, FPU_VFP_EXT_D32);
227 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
228 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
229 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
230 static const arm_feature_set fpu_neon_fp16 = ARM_FEATURE (0, FPU_NEON_FP16);
231
232 static int mfloat_abi_opt = -1;
233 /* Record user cpu selection for object attributes. */
234 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
235 /* Must be long enough to hold any of the names in arm_cpus. */
236 static char selected_cpu_name[16];
237 #ifdef OBJ_ELF
238 # ifdef EABI_DEFAULT
239 static int meabi_flags = EABI_DEFAULT;
240 # else
241 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
242 # endif
243
244 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
245
246 bfd_boolean
247 arm_is_eabi (void)
248 {
249 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
250 }
251 #endif
252
253 #ifdef OBJ_ELF
254 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
255 symbolS * GOT_symbol;
256 #endif
257
258 /* 0: assemble for ARM,
259 1: assemble for Thumb,
260 2: assemble for Thumb even though target CPU does not support thumb
261 instructions. */
262 static int thumb_mode = 0;
263 /* A value distinct from the possible values for thumb_mode that we
264 can use to record whether thumb_mode has been copied into the
265 tc_frag_data field of a frag. */
266 #define MODE_RECORDED (1 << 4)
267
268 /* If unified_syntax is true, we are processing the new unified
269 ARM/Thumb syntax. Important differences from the old ARM mode:
270
271 - Immediate operands do not require a # prefix.
272 - Conditional affixes always appear at the end of the
273 instruction. (For backward compatibility, those instructions
274 that formerly had them in the middle, continue to accept them
275 there.)
276 - The IT instruction may appear, and if it does is validated
277 against subsequent conditional affixes. It does not generate
278 machine code.
279
280 Important differences from the old Thumb mode:
281
282 - Immediate operands do not require a # prefix.
283 - Most of the V6T2 instructions are only available in unified mode.
284 - The .N and .W suffixes are recognized and honored (it is an error
285 if they cannot be honored).
286 - All instructions set the flags if and only if they have an 's' affix.
287 - Conditional affixes may be used. They are validated against
288 preceding IT instructions. Unlike ARM mode, you cannot use a
289 conditional affix except in the scope of an IT instruction. */
290
291 static bfd_boolean unified_syntax = FALSE;
292
293 enum neon_el_type
294 {
295 NT_invtype,
296 NT_untyped,
297 NT_integer,
298 NT_float,
299 NT_poly,
300 NT_signed,
301 NT_unsigned
302 };
303
304 struct neon_type_el
305 {
306 enum neon_el_type type;
307 unsigned size;
308 };
309
310 #define NEON_MAX_TYPE_ELS 4
311
312 struct neon_type
313 {
314 struct neon_type_el el[NEON_MAX_TYPE_ELS];
315 unsigned elems;
316 };
317
318 struct arm_it
319 {
320 const char * error;
321 unsigned long instruction;
322 int size;
323 int size_req;
324 int cond;
325 /* "uncond_value" is set to the value in place of the conditional field in
326 unconditional versions of the instruction, or -1 if nothing is
327 appropriate. */
328 int uncond_value;
329 struct neon_type vectype;
330 /* Set to the opcode if the instruction needs relaxation.
331 Zero if the instruction is not relaxed. */
332 unsigned long relax;
333 struct
334 {
335 bfd_reloc_code_real_type type;
336 expressionS exp;
337 int pc_rel;
338 } reloc;
339
340 struct
341 {
342 unsigned reg;
343 signed int imm;
344 struct neon_type_el vectype;
345 unsigned present : 1; /* Operand present. */
346 unsigned isreg : 1; /* Operand was a register. */
347 unsigned immisreg : 1; /* .imm field is a second register. */
348 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
349 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
350 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
351 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
352 instructions. This allows us to disambiguate ARM <-> vector insns. */
353 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
354 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
355 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
356 unsigned issingle : 1; /* Operand is VFP single-precision register. */
357 unsigned hasreloc : 1; /* Operand has relocation suffix. */
358 unsigned writeback : 1; /* Operand has trailing ! */
359 unsigned preind : 1; /* Preindexed address. */
360 unsigned postind : 1; /* Postindexed address. */
361 unsigned negative : 1; /* Index register was negated. */
362 unsigned shifted : 1; /* Shift applied to operation. */
363 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
364 } operands[6];
365 };
366
367 static struct arm_it inst;
368
369 #define NUM_FLOAT_VALS 8
370
371 const char * fp_const[] =
372 {
373 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
374 };
375
376 /* Number of littlenums required to hold an extended precision number. */
377 #define MAX_LITTLENUMS 6
378
379 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
380
381 #define FAIL (-1)
382 #define SUCCESS (0)
383
384 #define SUFF_S 1
385 #define SUFF_D 2
386 #define SUFF_E 3
387 #define SUFF_P 4
388
389 #define CP_T_X 0x00008000
390 #define CP_T_Y 0x00400000
391
392 #define CONDS_BIT 0x00100000
393 #define LOAD_BIT 0x00100000
394
395 #define DOUBLE_LOAD_FLAG 0x00000001
396
397 struct asm_cond
398 {
399 const char * template;
400 unsigned long value;
401 };
402
403 #define COND_ALWAYS 0xE
404
405 struct asm_psr
406 {
407 const char *template;
408 unsigned long field;
409 };
410
411 struct asm_barrier_opt
412 {
413 const char *template;
414 unsigned long value;
415 };
416
417 /* The bit that distinguishes CPSR and SPSR. */
418 #define SPSR_BIT (1 << 22)
419
420 /* The individual PSR flag bits. */
421 #define PSR_c (1 << 16)
422 #define PSR_x (1 << 17)
423 #define PSR_s (1 << 18)
424 #define PSR_f (1 << 19)
425
426 struct reloc_entry
427 {
428 char *name;
429 bfd_reloc_code_real_type reloc;
430 };
431
432 enum vfp_reg_pos
433 {
434 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
435 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
436 };
437
438 enum vfp_ldstm_type
439 {
440 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
441 };
442
443 /* Bits for DEFINED field in neon_typed_alias. */
444 #define NTA_HASTYPE 1
445 #define NTA_HASINDEX 2
446
447 struct neon_typed_alias
448 {
449 unsigned char defined;
450 unsigned char index;
451 struct neon_type_el eltype;
452 };
453
454 /* ARM register categories. This includes coprocessor numbers and various
455 architecture extensions' registers. */
456 enum arm_reg_type
457 {
458 REG_TYPE_RN,
459 REG_TYPE_CP,
460 REG_TYPE_CN,
461 REG_TYPE_FN,
462 REG_TYPE_VFS,
463 REG_TYPE_VFD,
464 REG_TYPE_NQ,
465 REG_TYPE_VFSD,
466 REG_TYPE_NDQ,
467 REG_TYPE_NSDQ,
468 REG_TYPE_VFC,
469 REG_TYPE_MVF,
470 REG_TYPE_MVD,
471 REG_TYPE_MVFX,
472 REG_TYPE_MVDX,
473 REG_TYPE_MVAX,
474 REG_TYPE_DSPSC,
475 REG_TYPE_MMXWR,
476 REG_TYPE_MMXWC,
477 REG_TYPE_MMXWCG,
478 REG_TYPE_XSCALE,
479 };
480
481 /* Structure for a hash table entry for a register.
482 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
483 information which states whether a vector type or index is specified (for a
484 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
485 struct reg_entry
486 {
487 const char *name;
488 unsigned char number;
489 unsigned char type;
490 unsigned char builtin;
491 struct neon_typed_alias *neon;
492 };
493
494 /* Diagnostics used when we don't get a register of the expected type. */
495 const char *const reg_expected_msgs[] =
496 {
497 N_("ARM register expected"),
498 N_("bad or missing co-processor number"),
499 N_("co-processor register expected"),
500 N_("FPA register expected"),
501 N_("VFP single precision register expected"),
502 N_("VFP/Neon double precision register expected"),
503 N_("Neon quad precision register expected"),
504 N_("VFP single or double precision register expected"),
505 N_("Neon double or quad precision register expected"),
506 N_("VFP single, double or Neon quad precision register expected"),
507 N_("VFP system register expected"),
508 N_("Maverick MVF register expected"),
509 N_("Maverick MVD register expected"),
510 N_("Maverick MVFX register expected"),
511 N_("Maverick MVDX register expected"),
512 N_("Maverick MVAX register expected"),
513 N_("Maverick DSPSC register expected"),
514 N_("iWMMXt data register expected"),
515 N_("iWMMXt control register expected"),
516 N_("iWMMXt scalar register expected"),
517 N_("XScale accumulator register expected"),
518 };
519
520 /* Some well known registers that we refer to directly elsewhere. */
521 #define REG_SP 13
522 #define REG_LR 14
523 #define REG_PC 15
524
525 /* ARM instructions take 4bytes in the object file, Thumb instructions
526 take 2: */
527 #define INSN_SIZE 4
528
529 struct asm_opcode
530 {
531 /* Basic string to match. */
532 const char *template;
533
534 /* Parameters to instruction. */
535 unsigned char operands[8];
536
537 /* Conditional tag - see opcode_lookup. */
538 unsigned int tag : 4;
539
540 /* Basic instruction code. */
541 unsigned int avalue : 28;
542
543 /* Thumb-format instruction code. */
544 unsigned int tvalue;
545
546 /* Which architecture variant provides this instruction. */
547 const arm_feature_set *avariant;
548 const arm_feature_set *tvariant;
549
550 /* Function to call to encode instruction in ARM format. */
551 void (* aencode) (void);
552
553 /* Function to call to encode instruction in Thumb format. */
554 void (* tencode) (void);
555 };
556
557 /* Defines for various bits that we will want to toggle. */
558 #define INST_IMMEDIATE 0x02000000
559 #define OFFSET_REG 0x02000000
560 #define HWOFFSET_IMM 0x00400000
561 #define SHIFT_BY_REG 0x00000010
562 #define PRE_INDEX 0x01000000
563 #define INDEX_UP 0x00800000
564 #define WRITE_BACK 0x00200000
565 #define LDM_TYPE_2_OR_3 0x00400000
566 #define CPSI_MMOD 0x00020000
567
568 #define LITERAL_MASK 0xf000f000
569 #define OPCODE_MASK 0xfe1fffff
570 #define V4_STR_BIT 0x00000020
571
572 #define T2_SUBS_PC_LR 0xf3de8f00
573
574 #define DATA_OP_SHIFT 21
575
576 #define T2_OPCODE_MASK 0xfe1fffff
577 #define T2_DATA_OP_SHIFT 21
578
579 /* Codes to distinguish the arithmetic instructions. */
580 #define OPCODE_AND 0
581 #define OPCODE_EOR 1
582 #define OPCODE_SUB 2
583 #define OPCODE_RSB 3
584 #define OPCODE_ADD 4
585 #define OPCODE_ADC 5
586 #define OPCODE_SBC 6
587 #define OPCODE_RSC 7
588 #define OPCODE_TST 8
589 #define OPCODE_TEQ 9
590 #define OPCODE_CMP 10
591 #define OPCODE_CMN 11
592 #define OPCODE_ORR 12
593 #define OPCODE_MOV 13
594 #define OPCODE_BIC 14
595 #define OPCODE_MVN 15
596
597 #define T2_OPCODE_AND 0
598 #define T2_OPCODE_BIC 1
599 #define T2_OPCODE_ORR 2
600 #define T2_OPCODE_ORN 3
601 #define T2_OPCODE_EOR 4
602 #define T2_OPCODE_ADD 8
603 #define T2_OPCODE_ADC 10
604 #define T2_OPCODE_SBC 11
605 #define T2_OPCODE_SUB 13
606 #define T2_OPCODE_RSB 14
607
608 #define T_OPCODE_MUL 0x4340
609 #define T_OPCODE_TST 0x4200
610 #define T_OPCODE_CMN 0x42c0
611 #define T_OPCODE_NEG 0x4240
612 #define T_OPCODE_MVN 0x43c0
613
614 #define T_OPCODE_ADD_R3 0x1800
615 #define T_OPCODE_SUB_R3 0x1a00
616 #define T_OPCODE_ADD_HI 0x4400
617 #define T_OPCODE_ADD_ST 0xb000
618 #define T_OPCODE_SUB_ST 0xb080
619 #define T_OPCODE_ADD_SP 0xa800
620 #define T_OPCODE_ADD_PC 0xa000
621 #define T_OPCODE_ADD_I8 0x3000
622 #define T_OPCODE_SUB_I8 0x3800
623 #define T_OPCODE_ADD_I3 0x1c00
624 #define T_OPCODE_SUB_I3 0x1e00
625
626 #define T_OPCODE_ASR_R 0x4100
627 #define T_OPCODE_LSL_R 0x4080
628 #define T_OPCODE_LSR_R 0x40c0
629 #define T_OPCODE_ROR_R 0x41c0
630 #define T_OPCODE_ASR_I 0x1000
631 #define T_OPCODE_LSL_I 0x0000
632 #define T_OPCODE_LSR_I 0x0800
633
634 #define T_OPCODE_MOV_I8 0x2000
635 #define T_OPCODE_CMP_I8 0x2800
636 #define T_OPCODE_CMP_LR 0x4280
637 #define T_OPCODE_MOV_HR 0x4600
638 #define T_OPCODE_CMP_HR 0x4500
639
640 #define T_OPCODE_LDR_PC 0x4800
641 #define T_OPCODE_LDR_SP 0x9800
642 #define T_OPCODE_STR_SP 0x9000
643 #define T_OPCODE_LDR_IW 0x6800
644 #define T_OPCODE_STR_IW 0x6000
645 #define T_OPCODE_LDR_IH 0x8800
646 #define T_OPCODE_STR_IH 0x8000
647 #define T_OPCODE_LDR_IB 0x7800
648 #define T_OPCODE_STR_IB 0x7000
649 #define T_OPCODE_LDR_RW 0x5800
650 #define T_OPCODE_STR_RW 0x5000
651 #define T_OPCODE_LDR_RH 0x5a00
652 #define T_OPCODE_STR_RH 0x5200
653 #define T_OPCODE_LDR_RB 0x5c00
654 #define T_OPCODE_STR_RB 0x5400
655
656 #define T_OPCODE_PUSH 0xb400
657 #define T_OPCODE_POP 0xbc00
658
659 #define T_OPCODE_BRANCH 0xe000
660
661 #define THUMB_SIZE 2 /* Size of thumb instruction. */
662 #define THUMB_PP_PC_LR 0x0100
663 #define THUMB_LOAD_BIT 0x0800
664 #define THUMB2_LOAD_BIT 0x00100000
665
666 #define BAD_ARGS _("bad arguments to instruction")
667 #define BAD_SP _("r13 not allowed here")
668 #define BAD_PC _("r15 not allowed here")
669 #define BAD_COND _("instruction cannot be conditional")
670 #define BAD_OVERLAP _("registers may not be the same")
671 #define BAD_HIREG _("lo register required")
672 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
673 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
674 #define BAD_BRANCH _("branch must be last instruction in IT block")
675 #define BAD_NOT_IT _("instruction not allowed in IT block")
676 #define BAD_FPU _("selected FPU does not support instruction")
677
678 static struct hash_control *arm_ops_hsh;
679 static struct hash_control *arm_cond_hsh;
680 static struct hash_control *arm_shift_hsh;
681 static struct hash_control *arm_psr_hsh;
682 static struct hash_control *arm_v7m_psr_hsh;
683 static struct hash_control *arm_reg_hsh;
684 static struct hash_control *arm_reloc_hsh;
685 static struct hash_control *arm_barrier_opt_hsh;
686
687 /* Stuff needed to resolve the label ambiguity
688 As:
689 ...
690 label: <insn>
691 may differ from:
692 ...
693 label:
694 <insn> */
695
696 symbolS * last_label_seen;
697 static int label_is_thumb_function_name = FALSE;
698 \f
699 /* Literal pool structure. Held on a per-section
700 and per-sub-section basis. */
701
702 #define MAX_LITERAL_POOL_SIZE 1024
703 typedef struct literal_pool
704 {
705 expressionS literals [MAX_LITERAL_POOL_SIZE];
706 unsigned int next_free_entry;
707 unsigned int id;
708 symbolS * symbol;
709 segT section;
710 subsegT sub_section;
711 struct literal_pool * next;
712 } literal_pool;
713
714 /* Pointer to a linked list of literal pools. */
715 literal_pool * list_of_pools = NULL;
716
717 /* State variables for IT block handling. */
718 static bfd_boolean current_it_mask = 0;
719 static int current_cc;
720 \f
721 /* Pure syntax. */
722
723 /* This array holds the chars that always start a comment. If the
724 pre-processor is disabled, these aren't very useful. */
725 const char comment_chars[] = "@";
726
727 /* This array holds the chars that only start a comment at the beginning of
728 a line. If the line seems to have the form '# 123 filename'
729 .line and .file directives will appear in the pre-processed output. */
730 /* Note that input_file.c hand checks for '#' at the beginning of the
731 first line of the input file. This is because the compiler outputs
732 #NO_APP at the beginning of its output. */
733 /* Also note that comments like this one will always work. */
734 const char line_comment_chars[] = "#";
735
736 const char line_separator_chars[] = ";";
737
738 /* Chars that can be used to separate mant
739 from exp in floating point numbers. */
740 const char EXP_CHARS[] = "eE";
741
742 /* Chars that mean this number is a floating point constant. */
743 /* As in 0f12.456 */
744 /* or 0d1.2345e12 */
745
746 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
747
748 /* Prefix characters that indicate the start of an immediate
749 value. */
750 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
751
752 /* Separator character handling. */
753
754 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
755
756 static inline int
757 skip_past_char (char ** str, char c)
758 {
759 if (**str == c)
760 {
761 (*str)++;
762 return SUCCESS;
763 }
764 else
765 return FAIL;
766 }
767 #define skip_past_comma(str) skip_past_char (str, ',')
768
769 /* Arithmetic expressions (possibly involving symbols). */
770
771 /* Return TRUE if anything in the expression is a bignum. */
772
773 static int
774 walk_no_bignums (symbolS * sp)
775 {
776 if (symbol_get_value_expression (sp)->X_op == O_big)
777 return 1;
778
779 if (symbol_get_value_expression (sp)->X_add_symbol)
780 {
781 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
782 || (symbol_get_value_expression (sp)->X_op_symbol
783 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
784 }
785
786 return 0;
787 }
788
789 static int in_my_get_expression = 0;
790
791 /* Third argument to my_get_expression. */
792 #define GE_NO_PREFIX 0
793 #define GE_IMM_PREFIX 1
794 #define GE_OPT_PREFIX 2
795 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
796 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
797 #define GE_OPT_PREFIX_BIG 3
798
799 static int
800 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
801 {
802 char * save_in;
803 segT seg;
804
805 /* In unified syntax, all prefixes are optional. */
806 if (unified_syntax)
807 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
808 : GE_OPT_PREFIX;
809
810 switch (prefix_mode)
811 {
812 case GE_NO_PREFIX: break;
813 case GE_IMM_PREFIX:
814 if (!is_immediate_prefix (**str))
815 {
816 inst.error = _("immediate expression requires a # prefix");
817 return FAIL;
818 }
819 (*str)++;
820 break;
821 case GE_OPT_PREFIX:
822 case GE_OPT_PREFIX_BIG:
823 if (is_immediate_prefix (**str))
824 (*str)++;
825 break;
826 default: abort ();
827 }
828
829 memset (ep, 0, sizeof (expressionS));
830
831 save_in = input_line_pointer;
832 input_line_pointer = *str;
833 in_my_get_expression = 1;
834 seg = expression (ep);
835 in_my_get_expression = 0;
836
837 if (ep->X_op == O_illegal)
838 {
839 /* We found a bad expression in md_operand(). */
840 *str = input_line_pointer;
841 input_line_pointer = save_in;
842 if (inst.error == NULL)
843 inst.error = _("bad expression");
844 return 1;
845 }
846
847 #ifdef OBJ_AOUT
848 if (seg != absolute_section
849 && seg != text_section
850 && seg != data_section
851 && seg != bss_section
852 && seg != undefined_section)
853 {
854 inst.error = _("bad segment");
855 *str = input_line_pointer;
856 input_line_pointer = save_in;
857 return 1;
858 }
859 #endif
860
861 /* Get rid of any bignums now, so that we don't generate an error for which
862 we can't establish a line number later on. Big numbers are never valid
863 in instructions, which is where this routine is always called. */
864 if (prefix_mode != GE_OPT_PREFIX_BIG
865 && (ep->X_op == O_big
866 || (ep->X_add_symbol
867 && (walk_no_bignums (ep->X_add_symbol)
868 || (ep->X_op_symbol
869 && walk_no_bignums (ep->X_op_symbol))))))
870 {
871 inst.error = _("invalid constant");
872 *str = input_line_pointer;
873 input_line_pointer = save_in;
874 return 1;
875 }
876
877 *str = input_line_pointer;
878 input_line_pointer = save_in;
879 return 0;
880 }
881
882 /* Turn a string in input_line_pointer into a floating point constant
883 of type TYPE, and store the appropriate bytes in *LITP. The number
884 of LITTLENUMS emitted is stored in *SIZEP. An error message is
885 returned, or NULL on OK.
886
887 Note that fp constants aren't represent in the normal way on the ARM.
888 In big endian mode, things are as expected. However, in little endian
889 mode fp constants are big-endian word-wise, and little-endian byte-wise
890 within the words. For example, (double) 1.1 in big endian mode is
891 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
892 the byte sequence 99 99 f1 3f 9a 99 99 99.
893
894 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
895
896 char *
897 md_atof (int type, char * litP, int * sizeP)
898 {
899 int prec;
900 LITTLENUM_TYPE words[MAX_LITTLENUMS];
901 char *t;
902 int i;
903
904 switch (type)
905 {
906 case 'f':
907 case 'F':
908 case 's':
909 case 'S':
910 prec = 2;
911 break;
912
913 case 'd':
914 case 'D':
915 case 'r':
916 case 'R':
917 prec = 4;
918 break;
919
920 case 'x':
921 case 'X':
922 prec = 5;
923 break;
924
925 case 'p':
926 case 'P':
927 prec = 5;
928 break;
929
930 default:
931 *sizeP = 0;
932 return _("Unrecognized or unsupported floating point constant");
933 }
934
935 t = atof_ieee (input_line_pointer, type, words);
936 if (t)
937 input_line_pointer = t;
938 *sizeP = prec * sizeof (LITTLENUM_TYPE);
939
940 if (target_big_endian)
941 {
942 for (i = 0; i < prec; i++)
943 {
944 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
945 litP += sizeof (LITTLENUM_TYPE);
946 }
947 }
948 else
949 {
950 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
951 for (i = prec - 1; i >= 0; i--)
952 {
953 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
954 litP += sizeof (LITTLENUM_TYPE);
955 }
956 else
957 /* For a 4 byte float the order of elements in `words' is 1 0.
958 For an 8 byte float the order is 1 0 3 2. */
959 for (i = 0; i < prec; i += 2)
960 {
961 md_number_to_chars (litP, (valueT) words[i + 1],
962 sizeof (LITTLENUM_TYPE));
963 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
964 (valueT) words[i], sizeof (LITTLENUM_TYPE));
965 litP += 2 * sizeof (LITTLENUM_TYPE);
966 }
967 }
968
969 return NULL;
970 }
971
972 /* We handle all bad expressions here, so that we can report the faulty
973 instruction in the error message. */
974 void
975 md_operand (expressionS * expr)
976 {
977 if (in_my_get_expression)
978 expr->X_op = O_illegal;
979 }
980
981 /* Immediate values. */
982
983 /* Generic immediate-value read function for use in directives.
984 Accepts anything that 'expression' can fold to a constant.
985 *val receives the number. */
986 #ifdef OBJ_ELF
987 static int
988 immediate_for_directive (int *val)
989 {
990 expressionS exp;
991 exp.X_op = O_illegal;
992
993 if (is_immediate_prefix (*input_line_pointer))
994 {
995 input_line_pointer++;
996 expression (&exp);
997 }
998
999 if (exp.X_op != O_constant)
1000 {
1001 as_bad (_("expected #constant"));
1002 ignore_rest_of_line ();
1003 return FAIL;
1004 }
1005 *val = exp.X_add_number;
1006 return SUCCESS;
1007 }
1008 #endif
1009
1010 /* Register parsing. */
1011
1012 /* Generic register parser. CCP points to what should be the
1013 beginning of a register name. If it is indeed a valid register
1014 name, advance CCP over it and return the reg_entry structure;
1015 otherwise return NULL. Does not issue diagnostics. */
1016
1017 static struct reg_entry *
1018 arm_reg_parse_multi (char **ccp)
1019 {
1020 char *start = *ccp;
1021 char *p;
1022 struct reg_entry *reg;
1023
1024 #ifdef REGISTER_PREFIX
1025 if (*start != REGISTER_PREFIX)
1026 return NULL;
1027 start++;
1028 #endif
1029 #ifdef OPTIONAL_REGISTER_PREFIX
1030 if (*start == OPTIONAL_REGISTER_PREFIX)
1031 start++;
1032 #endif
1033
1034 p = start;
1035 if (!ISALPHA (*p) || !is_name_beginner (*p))
1036 return NULL;
1037
1038 do
1039 p++;
1040 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1041
1042 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1043
1044 if (!reg)
1045 return NULL;
1046
1047 *ccp = p;
1048 return reg;
1049 }
1050
1051 static int
1052 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1053 enum arm_reg_type type)
1054 {
1055 /* Alternative syntaxes are accepted for a few register classes. */
1056 switch (type)
1057 {
1058 case REG_TYPE_MVF:
1059 case REG_TYPE_MVD:
1060 case REG_TYPE_MVFX:
1061 case REG_TYPE_MVDX:
1062 /* Generic coprocessor register names are allowed for these. */
1063 if (reg && reg->type == REG_TYPE_CN)
1064 return reg->number;
1065 break;
1066
1067 case REG_TYPE_CP:
1068 /* For backward compatibility, a bare number is valid here. */
1069 {
1070 unsigned long processor = strtoul (start, ccp, 10);
1071 if (*ccp != start && processor <= 15)
1072 return processor;
1073 }
1074
1075 case REG_TYPE_MMXWC:
1076 /* WC includes WCG. ??? I'm not sure this is true for all
1077 instructions that take WC registers. */
1078 if (reg && reg->type == REG_TYPE_MMXWCG)
1079 return reg->number;
1080 break;
1081
1082 default:
1083 break;
1084 }
1085
1086 return FAIL;
1087 }
1088
1089 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1090 return value is the register number or FAIL. */
1091
1092 static int
1093 arm_reg_parse (char **ccp, enum arm_reg_type type)
1094 {
1095 char *start = *ccp;
1096 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1097 int ret;
1098
1099 /* Do not allow a scalar (reg+index) to parse as a register. */
1100 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1101 return FAIL;
1102
1103 if (reg && reg->type == type)
1104 return reg->number;
1105
1106 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1107 return ret;
1108
1109 *ccp = start;
1110 return FAIL;
1111 }
1112
1113 /* Parse a Neon type specifier. *STR should point at the leading '.'
1114 character. Does no verification at this stage that the type fits the opcode
1115 properly. E.g.,
1116
1117 .i32.i32.s16
1118 .s32.f32
1119 .u16
1120
1121 Can all be legally parsed by this function.
1122
1123 Fills in neon_type struct pointer with parsed information, and updates STR
1124 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1125 type, FAIL if not. */
1126
1127 static int
1128 parse_neon_type (struct neon_type *type, char **str)
1129 {
1130 char *ptr = *str;
1131
1132 if (type)
1133 type->elems = 0;
1134
1135 while (type->elems < NEON_MAX_TYPE_ELS)
1136 {
1137 enum neon_el_type thistype = NT_untyped;
1138 unsigned thissize = -1u;
1139
1140 if (*ptr != '.')
1141 break;
1142
1143 ptr++;
1144
1145 /* Just a size without an explicit type. */
1146 if (ISDIGIT (*ptr))
1147 goto parsesize;
1148
1149 switch (TOLOWER (*ptr))
1150 {
1151 case 'i': thistype = NT_integer; break;
1152 case 'f': thistype = NT_float; break;
1153 case 'p': thistype = NT_poly; break;
1154 case 's': thistype = NT_signed; break;
1155 case 'u': thistype = NT_unsigned; break;
1156 case 'd':
1157 thistype = NT_float;
1158 thissize = 64;
1159 ptr++;
1160 goto done;
1161 default:
1162 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1163 return FAIL;
1164 }
1165
1166 ptr++;
1167
1168 /* .f is an abbreviation for .f32. */
1169 if (thistype == NT_float && !ISDIGIT (*ptr))
1170 thissize = 32;
1171 else
1172 {
1173 parsesize:
1174 thissize = strtoul (ptr, &ptr, 10);
1175
1176 if (thissize != 8 && thissize != 16 && thissize != 32
1177 && thissize != 64)
1178 {
1179 as_bad (_("bad size %d in type specifier"), thissize);
1180 return FAIL;
1181 }
1182 }
1183
1184 done:
1185 if (type)
1186 {
1187 type->el[type->elems].type = thistype;
1188 type->el[type->elems].size = thissize;
1189 type->elems++;
1190 }
1191 }
1192
1193 /* Empty/missing type is not a successful parse. */
1194 if (type->elems == 0)
1195 return FAIL;
1196
1197 *str = ptr;
1198
1199 return SUCCESS;
1200 }
1201
1202 /* Errors may be set multiple times during parsing or bit encoding
1203 (particularly in the Neon bits), but usually the earliest error which is set
1204 will be the most meaningful. Avoid overwriting it with later (cascading)
1205 errors by calling this function. */
1206
1207 static void
1208 first_error (const char *err)
1209 {
1210 if (!inst.error)
1211 inst.error = err;
1212 }
1213
1214 /* Parse a single type, e.g. ".s32", leading period included. */
1215 static int
1216 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1217 {
1218 char *str = *ccp;
1219 struct neon_type optype;
1220
1221 if (*str == '.')
1222 {
1223 if (parse_neon_type (&optype, &str) == SUCCESS)
1224 {
1225 if (optype.elems == 1)
1226 *vectype = optype.el[0];
1227 else
1228 {
1229 first_error (_("only one type should be specified for operand"));
1230 return FAIL;
1231 }
1232 }
1233 else
1234 {
1235 first_error (_("vector type expected"));
1236 return FAIL;
1237 }
1238 }
1239 else
1240 return FAIL;
1241
1242 *ccp = str;
1243
1244 return SUCCESS;
1245 }
1246
1247 /* Special meanings for indices (which have a range of 0-7), which will fit into
1248 a 4-bit integer. */
1249
1250 #define NEON_ALL_LANES 15
1251 #define NEON_INTERLEAVE_LANES 14
1252
1253 /* Parse either a register or a scalar, with an optional type. Return the
1254 register number, and optionally fill in the actual type of the register
1255 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1256 type/index information in *TYPEINFO. */
1257
1258 static int
1259 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1260 enum arm_reg_type *rtype,
1261 struct neon_typed_alias *typeinfo)
1262 {
1263 char *str = *ccp;
1264 struct reg_entry *reg = arm_reg_parse_multi (&str);
1265 struct neon_typed_alias atype;
1266 struct neon_type_el parsetype;
1267
1268 atype.defined = 0;
1269 atype.index = -1;
1270 atype.eltype.type = NT_invtype;
1271 atype.eltype.size = -1;
1272
1273 /* Try alternate syntax for some types of register. Note these are mutually
1274 exclusive with the Neon syntax extensions. */
1275 if (reg == NULL)
1276 {
1277 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1278 if (altreg != FAIL)
1279 *ccp = str;
1280 if (typeinfo)
1281 *typeinfo = atype;
1282 return altreg;
1283 }
1284
1285 /* Undo polymorphism when a set of register types may be accepted. */
1286 if ((type == REG_TYPE_NDQ
1287 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1288 || (type == REG_TYPE_VFSD
1289 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1290 || (type == REG_TYPE_NSDQ
1291 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1292 || reg->type == REG_TYPE_NQ))
1293 || (type == REG_TYPE_MMXWC
1294 && (reg->type == REG_TYPE_MMXWCG)))
1295 type = reg->type;
1296
1297 if (type != reg->type)
1298 return FAIL;
1299
1300 if (reg->neon)
1301 atype = *reg->neon;
1302
1303 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1304 {
1305 if ((atype.defined & NTA_HASTYPE) != 0)
1306 {
1307 first_error (_("can't redefine type for operand"));
1308 return FAIL;
1309 }
1310 atype.defined |= NTA_HASTYPE;
1311 atype.eltype = parsetype;
1312 }
1313
1314 if (skip_past_char (&str, '[') == SUCCESS)
1315 {
1316 if (type != REG_TYPE_VFD)
1317 {
1318 first_error (_("only D registers may be indexed"));
1319 return FAIL;
1320 }
1321
1322 if ((atype.defined & NTA_HASINDEX) != 0)
1323 {
1324 first_error (_("can't change index for operand"));
1325 return FAIL;
1326 }
1327
1328 atype.defined |= NTA_HASINDEX;
1329
1330 if (skip_past_char (&str, ']') == SUCCESS)
1331 atype.index = NEON_ALL_LANES;
1332 else
1333 {
1334 expressionS exp;
1335
1336 my_get_expression (&exp, &str, GE_NO_PREFIX);
1337
1338 if (exp.X_op != O_constant)
1339 {
1340 first_error (_("constant expression required"));
1341 return FAIL;
1342 }
1343
1344 if (skip_past_char (&str, ']') == FAIL)
1345 return FAIL;
1346
1347 atype.index = exp.X_add_number;
1348 }
1349 }
1350
1351 if (typeinfo)
1352 *typeinfo = atype;
1353
1354 if (rtype)
1355 *rtype = type;
1356
1357 *ccp = str;
1358
1359 return reg->number;
1360 }
1361
1362 /* Like arm_reg_parse, but allow allow the following extra features:
1363 - If RTYPE is non-zero, return the (possibly restricted) type of the
1364 register (e.g. Neon double or quad reg when either has been requested).
1365 - If this is a Neon vector type with additional type information, fill
1366 in the struct pointed to by VECTYPE (if non-NULL).
1367 This function will fault on encountering a scalar. */
1368
1369 static int
1370 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1371 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1372 {
1373 struct neon_typed_alias atype;
1374 char *str = *ccp;
1375 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1376
1377 if (reg == FAIL)
1378 return FAIL;
1379
1380 /* Do not allow a scalar (reg+index) to parse as a register. */
1381 if ((atype.defined & NTA_HASINDEX) != 0)
1382 {
1383 first_error (_("register operand expected, but got scalar"));
1384 return FAIL;
1385 }
1386
1387 if (vectype)
1388 *vectype = atype.eltype;
1389
1390 *ccp = str;
1391
1392 return reg;
1393 }
1394
1395 #define NEON_SCALAR_REG(X) ((X) >> 4)
1396 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1397
1398 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1399 have enough information to be able to do a good job bounds-checking. So, we
1400 just do easy checks here, and do further checks later. */
1401
1402 static int
1403 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1404 {
1405 int reg;
1406 char *str = *ccp;
1407 struct neon_typed_alias atype;
1408
1409 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1410
1411 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1412 return FAIL;
1413
1414 if (atype.index == NEON_ALL_LANES)
1415 {
1416 first_error (_("scalar must have an index"));
1417 return FAIL;
1418 }
1419 else if (atype.index >= 64 / elsize)
1420 {
1421 first_error (_("scalar index out of range"));
1422 return FAIL;
1423 }
1424
1425 if (type)
1426 *type = atype.eltype;
1427
1428 *ccp = str;
1429
1430 return reg * 16 + atype.index;
1431 }
1432
1433 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1434 static long
1435 parse_reg_list (char ** strp)
1436 {
1437 char * str = * strp;
1438 long range = 0;
1439 int another_range;
1440
1441 /* We come back here if we get ranges concatenated by '+' or '|'. */
1442 do
1443 {
1444 another_range = 0;
1445
1446 if (*str == '{')
1447 {
1448 int in_range = 0;
1449 int cur_reg = -1;
1450
1451 str++;
1452 do
1453 {
1454 int reg;
1455
1456 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1457 {
1458 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1459 return FAIL;
1460 }
1461
1462 if (in_range)
1463 {
1464 int i;
1465
1466 if (reg <= cur_reg)
1467 {
1468 first_error (_("bad range in register list"));
1469 return FAIL;
1470 }
1471
1472 for (i = cur_reg + 1; i < reg; i++)
1473 {
1474 if (range & (1 << i))
1475 as_tsktsk
1476 (_("Warning: duplicated register (r%d) in register list"),
1477 i);
1478 else
1479 range |= 1 << i;
1480 }
1481 in_range = 0;
1482 }
1483
1484 if (range & (1 << reg))
1485 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1486 reg);
1487 else if (reg <= cur_reg)
1488 as_tsktsk (_("Warning: register range not in ascending order"));
1489
1490 range |= 1 << reg;
1491 cur_reg = reg;
1492 }
1493 while (skip_past_comma (&str) != FAIL
1494 || (in_range = 1, *str++ == '-'));
1495 str--;
1496
1497 if (*str++ != '}')
1498 {
1499 first_error (_("missing `}'"));
1500 return FAIL;
1501 }
1502 }
1503 else
1504 {
1505 expressionS expr;
1506
1507 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1508 return FAIL;
1509
1510 if (expr.X_op == O_constant)
1511 {
1512 if (expr.X_add_number
1513 != (expr.X_add_number & 0x0000ffff))
1514 {
1515 inst.error = _("invalid register mask");
1516 return FAIL;
1517 }
1518
1519 if ((range & expr.X_add_number) != 0)
1520 {
1521 int regno = range & expr.X_add_number;
1522
1523 regno &= -regno;
1524 regno = (1 << regno) - 1;
1525 as_tsktsk
1526 (_("Warning: duplicated register (r%d) in register list"),
1527 regno);
1528 }
1529
1530 range |= expr.X_add_number;
1531 }
1532 else
1533 {
1534 if (inst.reloc.type != 0)
1535 {
1536 inst.error = _("expression too complex");
1537 return FAIL;
1538 }
1539
1540 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1541 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1542 inst.reloc.pc_rel = 0;
1543 }
1544 }
1545
1546 if (*str == '|' || *str == '+')
1547 {
1548 str++;
1549 another_range = 1;
1550 }
1551 }
1552 while (another_range);
1553
1554 *strp = str;
1555 return range;
1556 }
1557
1558 /* Types of registers in a list. */
1559
1560 enum reg_list_els
1561 {
1562 REGLIST_VFP_S,
1563 REGLIST_VFP_D,
1564 REGLIST_NEON_D
1565 };
1566
1567 /* Parse a VFP register list. If the string is invalid return FAIL.
1568 Otherwise return the number of registers, and set PBASE to the first
1569 register. Parses registers of type ETYPE.
1570 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1571 - Q registers can be used to specify pairs of D registers
1572 - { } can be omitted from around a singleton register list
1573 FIXME: This is not implemented, as it would require backtracking in
1574 some cases, e.g.:
1575 vtbl.8 d3,d4,d5
1576 This could be done (the meaning isn't really ambiguous), but doesn't
1577 fit in well with the current parsing framework.
1578 - 32 D registers may be used (also true for VFPv3).
1579 FIXME: Types are ignored in these register lists, which is probably a
1580 bug. */
1581
1582 static int
1583 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1584 {
1585 char *str = *ccp;
1586 int base_reg;
1587 int new_base;
1588 enum arm_reg_type regtype = 0;
1589 int max_regs = 0;
1590 int count = 0;
1591 int warned = 0;
1592 unsigned long mask = 0;
1593 int i;
1594
1595 if (*str != '{')
1596 {
1597 inst.error = _("expecting {");
1598 return FAIL;
1599 }
1600
1601 str++;
1602
1603 switch (etype)
1604 {
1605 case REGLIST_VFP_S:
1606 regtype = REG_TYPE_VFS;
1607 max_regs = 32;
1608 break;
1609
1610 case REGLIST_VFP_D:
1611 regtype = REG_TYPE_VFD;
1612 break;
1613
1614 case REGLIST_NEON_D:
1615 regtype = REG_TYPE_NDQ;
1616 break;
1617 }
1618
1619 if (etype != REGLIST_VFP_S)
1620 {
1621 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1622 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1623 {
1624 max_regs = 32;
1625 if (thumb_mode)
1626 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1627 fpu_vfp_ext_d32);
1628 else
1629 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1630 fpu_vfp_ext_d32);
1631 }
1632 else
1633 max_regs = 16;
1634 }
1635
1636 base_reg = max_regs;
1637
1638 do
1639 {
1640 int setmask = 1, addregs = 1;
1641
1642 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1643
1644 if (new_base == FAIL)
1645 {
1646 first_error (_(reg_expected_msgs[regtype]));
1647 return FAIL;
1648 }
1649
1650 if (new_base >= max_regs)
1651 {
1652 first_error (_("register out of range in list"));
1653 return FAIL;
1654 }
1655
1656 /* Note: a value of 2 * n is returned for the register Q<n>. */
1657 if (regtype == REG_TYPE_NQ)
1658 {
1659 setmask = 3;
1660 addregs = 2;
1661 }
1662
1663 if (new_base < base_reg)
1664 base_reg = new_base;
1665
1666 if (mask & (setmask << new_base))
1667 {
1668 first_error (_("invalid register list"));
1669 return FAIL;
1670 }
1671
1672 if ((mask >> new_base) != 0 && ! warned)
1673 {
1674 as_tsktsk (_("register list not in ascending order"));
1675 warned = 1;
1676 }
1677
1678 mask |= setmask << new_base;
1679 count += addregs;
1680
1681 if (*str == '-') /* We have the start of a range expression */
1682 {
1683 int high_range;
1684
1685 str++;
1686
1687 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1688 == FAIL)
1689 {
1690 inst.error = gettext (reg_expected_msgs[regtype]);
1691 return FAIL;
1692 }
1693
1694 if (high_range >= max_regs)
1695 {
1696 first_error (_("register out of range in list"));
1697 return FAIL;
1698 }
1699
1700 if (regtype == REG_TYPE_NQ)
1701 high_range = high_range + 1;
1702
1703 if (high_range <= new_base)
1704 {
1705 inst.error = _("register range not in ascending order");
1706 return FAIL;
1707 }
1708
1709 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1710 {
1711 if (mask & (setmask << new_base))
1712 {
1713 inst.error = _("invalid register list");
1714 return FAIL;
1715 }
1716
1717 mask |= setmask << new_base;
1718 count += addregs;
1719 }
1720 }
1721 }
1722 while (skip_past_comma (&str) != FAIL);
1723
1724 str++;
1725
1726 /* Sanity check -- should have raised a parse error above. */
1727 if (count == 0 || count > max_regs)
1728 abort ();
1729
1730 *pbase = base_reg;
1731
1732 /* Final test -- the registers must be consecutive. */
1733 mask >>= base_reg;
1734 for (i = 0; i < count; i++)
1735 {
1736 if ((mask & (1u << i)) == 0)
1737 {
1738 inst.error = _("non-contiguous register range");
1739 return FAIL;
1740 }
1741 }
1742
1743 *ccp = str;
1744
1745 return count;
1746 }
1747
1748 /* True if two alias types are the same. */
1749
1750 static int
1751 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1752 {
1753 if (!a && !b)
1754 return 1;
1755
1756 if (!a || !b)
1757 return 0;
1758
1759 if (a->defined != b->defined)
1760 return 0;
1761
1762 if ((a->defined & NTA_HASTYPE) != 0
1763 && (a->eltype.type != b->eltype.type
1764 || a->eltype.size != b->eltype.size))
1765 return 0;
1766
1767 if ((a->defined & NTA_HASINDEX) != 0
1768 && (a->index != b->index))
1769 return 0;
1770
1771 return 1;
1772 }
1773
1774 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1775 The base register is put in *PBASE.
1776 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1777 the return value.
1778 The register stride (minus one) is put in bit 4 of the return value.
1779 Bits [6:5] encode the list length (minus one).
1780 The type of the list elements is put in *ELTYPE, if non-NULL. */
1781
1782 #define NEON_LANE(X) ((X) & 0xf)
1783 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1784 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1785
1786 static int
1787 parse_neon_el_struct_list (char **str, unsigned *pbase,
1788 struct neon_type_el *eltype)
1789 {
1790 char *ptr = *str;
1791 int base_reg = -1;
1792 int reg_incr = -1;
1793 int count = 0;
1794 int lane = -1;
1795 int leading_brace = 0;
1796 enum arm_reg_type rtype = REG_TYPE_NDQ;
1797 int addregs = 1;
1798 const char *const incr_error = "register stride must be 1 or 2";
1799 const char *const type_error = "mismatched element/structure types in list";
1800 struct neon_typed_alias firsttype;
1801
1802 if (skip_past_char (&ptr, '{') == SUCCESS)
1803 leading_brace = 1;
1804
1805 do
1806 {
1807 struct neon_typed_alias atype;
1808 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1809
1810 if (getreg == FAIL)
1811 {
1812 first_error (_(reg_expected_msgs[rtype]));
1813 return FAIL;
1814 }
1815
1816 if (base_reg == -1)
1817 {
1818 base_reg = getreg;
1819 if (rtype == REG_TYPE_NQ)
1820 {
1821 reg_incr = 1;
1822 addregs = 2;
1823 }
1824 firsttype = atype;
1825 }
1826 else if (reg_incr == -1)
1827 {
1828 reg_incr = getreg - base_reg;
1829 if (reg_incr < 1 || reg_incr > 2)
1830 {
1831 first_error (_(incr_error));
1832 return FAIL;
1833 }
1834 }
1835 else if (getreg != base_reg + reg_incr * count)
1836 {
1837 first_error (_(incr_error));
1838 return FAIL;
1839 }
1840
1841 if (!neon_alias_types_same (&atype, &firsttype))
1842 {
1843 first_error (_(type_error));
1844 return FAIL;
1845 }
1846
1847 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1848 modes. */
1849 if (ptr[0] == '-')
1850 {
1851 struct neon_typed_alias htype;
1852 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1853 if (lane == -1)
1854 lane = NEON_INTERLEAVE_LANES;
1855 else if (lane != NEON_INTERLEAVE_LANES)
1856 {
1857 first_error (_(type_error));
1858 return FAIL;
1859 }
1860 if (reg_incr == -1)
1861 reg_incr = 1;
1862 else if (reg_incr != 1)
1863 {
1864 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1865 return FAIL;
1866 }
1867 ptr++;
1868 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1869 if (hireg == FAIL)
1870 {
1871 first_error (_(reg_expected_msgs[rtype]));
1872 return FAIL;
1873 }
1874 if (!neon_alias_types_same (&htype, &firsttype))
1875 {
1876 first_error (_(type_error));
1877 return FAIL;
1878 }
1879 count += hireg + dregs - getreg;
1880 continue;
1881 }
1882
1883 /* If we're using Q registers, we can't use [] or [n] syntax. */
1884 if (rtype == REG_TYPE_NQ)
1885 {
1886 count += 2;
1887 continue;
1888 }
1889
1890 if ((atype.defined & NTA_HASINDEX) != 0)
1891 {
1892 if (lane == -1)
1893 lane = atype.index;
1894 else if (lane != atype.index)
1895 {
1896 first_error (_(type_error));
1897 return FAIL;
1898 }
1899 }
1900 else if (lane == -1)
1901 lane = NEON_INTERLEAVE_LANES;
1902 else if (lane != NEON_INTERLEAVE_LANES)
1903 {
1904 first_error (_(type_error));
1905 return FAIL;
1906 }
1907 count++;
1908 }
1909 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1910
1911 /* No lane set by [x]. We must be interleaving structures. */
1912 if (lane == -1)
1913 lane = NEON_INTERLEAVE_LANES;
1914
1915 /* Sanity check. */
1916 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1917 || (count > 1 && reg_incr == -1))
1918 {
1919 first_error (_("error parsing element/structure list"));
1920 return FAIL;
1921 }
1922
1923 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1924 {
1925 first_error (_("expected }"));
1926 return FAIL;
1927 }
1928
1929 if (reg_incr == -1)
1930 reg_incr = 1;
1931
1932 if (eltype)
1933 *eltype = firsttype.eltype;
1934
1935 *pbase = base_reg;
1936 *str = ptr;
1937
1938 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1939 }
1940
1941 /* Parse an explicit relocation suffix on an expression. This is
1942 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1943 arm_reloc_hsh contains no entries, so this function can only
1944 succeed if there is no () after the word. Returns -1 on error,
1945 BFD_RELOC_UNUSED if there wasn't any suffix. */
1946 static int
1947 parse_reloc (char **str)
1948 {
1949 struct reloc_entry *r;
1950 char *p, *q;
1951
1952 if (**str != '(')
1953 return BFD_RELOC_UNUSED;
1954
1955 p = *str + 1;
1956 q = p;
1957
1958 while (*q && *q != ')' && *q != ',')
1959 q++;
1960 if (*q != ')')
1961 return -1;
1962
1963 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1964 return -1;
1965
1966 *str = q + 1;
1967 return r->reloc;
1968 }
1969
1970 /* Directives: register aliases. */
1971
1972 static struct reg_entry *
1973 insert_reg_alias (char *str, int number, int type)
1974 {
1975 struct reg_entry *new;
1976 const char *name;
1977
1978 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1979 {
1980 if (new->builtin)
1981 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1982
1983 /* Only warn about a redefinition if it's not defined as the
1984 same register. */
1985 else if (new->number != number || new->type != type)
1986 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1987
1988 return NULL;
1989 }
1990
1991 name = xstrdup (str);
1992 new = xmalloc (sizeof (struct reg_entry));
1993
1994 new->name = name;
1995 new->number = number;
1996 new->type = type;
1997 new->builtin = FALSE;
1998 new->neon = NULL;
1999
2000 if (hash_insert (arm_reg_hsh, name, (void *) new))
2001 abort ();
2002
2003 return new;
2004 }
2005
2006 static void
2007 insert_neon_reg_alias (char *str, int number, int type,
2008 struct neon_typed_alias *atype)
2009 {
2010 struct reg_entry *reg = insert_reg_alias (str, number, type);
2011
2012 if (!reg)
2013 {
2014 first_error (_("attempt to redefine typed alias"));
2015 return;
2016 }
2017
2018 if (atype)
2019 {
2020 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2021 *reg->neon = *atype;
2022 }
2023 }
2024
2025 /* Look for the .req directive. This is of the form:
2026
2027 new_register_name .req existing_register_name
2028
2029 If we find one, or if it looks sufficiently like one that we want to
2030 handle any error here, return TRUE. Otherwise return FALSE. */
2031
2032 static bfd_boolean
2033 create_register_alias (char * newname, char *p)
2034 {
2035 struct reg_entry *old;
2036 char *oldname, *nbuf;
2037 size_t nlen;
2038
2039 /* The input scrubber ensures that whitespace after the mnemonic is
2040 collapsed to single spaces. */
2041 oldname = p;
2042 if (strncmp (oldname, " .req ", 6) != 0)
2043 return FALSE;
2044
2045 oldname += 6;
2046 if (*oldname == '\0')
2047 return FALSE;
2048
2049 old = hash_find (arm_reg_hsh, oldname);
2050 if (!old)
2051 {
2052 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2053 return TRUE;
2054 }
2055
2056 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2057 the desired alias name, and p points to its end. If not, then
2058 the desired alias name is in the global original_case_string. */
2059 #ifdef TC_CASE_SENSITIVE
2060 nlen = p - newname;
2061 #else
2062 newname = original_case_string;
2063 nlen = strlen (newname);
2064 #endif
2065
2066 nbuf = alloca (nlen + 1);
2067 memcpy (nbuf, newname, nlen);
2068 nbuf[nlen] = '\0';
2069
2070 /* Create aliases under the new name as stated; an all-lowercase
2071 version of the new name; and an all-uppercase version of the new
2072 name. */
2073 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2074 {
2075 for (p = nbuf; *p; p++)
2076 *p = TOUPPER (*p);
2077
2078 if (strncmp (nbuf, newname, nlen))
2079 {
2080 /* If this attempt to create an additional alias fails, do not bother
2081 trying to create the all-lower case alias. We will fail and issue
2082 a second, duplicate error message. This situation arises when the
2083 programmer does something like:
2084 foo .req r0
2085 Foo .req r1
2086 The second .req creates the "Foo" alias but then fails to create
2087 the artificial FOO alias because it has already been created by the
2088 first .req. */
2089 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2090 return TRUE;
2091 }
2092
2093 for (p = nbuf; *p; p++)
2094 *p = TOLOWER (*p);
2095
2096 if (strncmp (nbuf, newname, nlen))
2097 insert_reg_alias (nbuf, old->number, old->type);
2098 }
2099
2100 return TRUE;
2101 }
2102
2103 /* Create a Neon typed/indexed register alias using directives, e.g.:
2104 X .dn d5.s32[1]
2105 Y .qn 6.s16
2106 Z .dn d7
2107 T .dn Z[0]
2108 These typed registers can be used instead of the types specified after the
2109 Neon mnemonic, so long as all operands given have types. Types can also be
2110 specified directly, e.g.:
2111 vadd d0.s32, d1.s32, d2.s32 */
2112
2113 static int
2114 create_neon_reg_alias (char *newname, char *p)
2115 {
2116 enum arm_reg_type basetype;
2117 struct reg_entry *basereg;
2118 struct reg_entry mybasereg;
2119 struct neon_type ntype;
2120 struct neon_typed_alias typeinfo;
2121 char *namebuf, *nameend;
2122 int namelen;
2123
2124 typeinfo.defined = 0;
2125 typeinfo.eltype.type = NT_invtype;
2126 typeinfo.eltype.size = -1;
2127 typeinfo.index = -1;
2128
2129 nameend = p;
2130
2131 if (strncmp (p, " .dn ", 5) == 0)
2132 basetype = REG_TYPE_VFD;
2133 else if (strncmp (p, " .qn ", 5) == 0)
2134 basetype = REG_TYPE_NQ;
2135 else
2136 return 0;
2137
2138 p += 5;
2139
2140 if (*p == '\0')
2141 return 0;
2142
2143 basereg = arm_reg_parse_multi (&p);
2144
2145 if (basereg && basereg->type != basetype)
2146 {
2147 as_bad (_("bad type for register"));
2148 return 0;
2149 }
2150
2151 if (basereg == NULL)
2152 {
2153 expressionS exp;
2154 /* Try parsing as an integer. */
2155 my_get_expression (&exp, &p, GE_NO_PREFIX);
2156 if (exp.X_op != O_constant)
2157 {
2158 as_bad (_("expression must be constant"));
2159 return 0;
2160 }
2161 basereg = &mybasereg;
2162 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2163 : exp.X_add_number;
2164 basereg->neon = 0;
2165 }
2166
2167 if (basereg->neon)
2168 typeinfo = *basereg->neon;
2169
2170 if (parse_neon_type (&ntype, &p) == SUCCESS)
2171 {
2172 /* We got a type. */
2173 if (typeinfo.defined & NTA_HASTYPE)
2174 {
2175 as_bad (_("can't redefine the type of a register alias"));
2176 return 0;
2177 }
2178
2179 typeinfo.defined |= NTA_HASTYPE;
2180 if (ntype.elems != 1)
2181 {
2182 as_bad (_("you must specify a single type only"));
2183 return 0;
2184 }
2185 typeinfo.eltype = ntype.el[0];
2186 }
2187
2188 if (skip_past_char (&p, '[') == SUCCESS)
2189 {
2190 expressionS exp;
2191 /* We got a scalar index. */
2192
2193 if (typeinfo.defined & NTA_HASINDEX)
2194 {
2195 as_bad (_("can't redefine the index of a scalar alias"));
2196 return 0;
2197 }
2198
2199 my_get_expression (&exp, &p, GE_NO_PREFIX);
2200
2201 if (exp.X_op != O_constant)
2202 {
2203 as_bad (_("scalar index must be constant"));
2204 return 0;
2205 }
2206
2207 typeinfo.defined |= NTA_HASINDEX;
2208 typeinfo.index = exp.X_add_number;
2209
2210 if (skip_past_char (&p, ']') == FAIL)
2211 {
2212 as_bad (_("expecting ]"));
2213 return 0;
2214 }
2215 }
2216
2217 namelen = nameend - newname;
2218 namebuf = alloca (namelen + 1);
2219 strncpy (namebuf, newname, namelen);
2220 namebuf[namelen] = '\0';
2221
2222 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2223 typeinfo.defined != 0 ? &typeinfo : NULL);
2224
2225 /* Insert name in all uppercase. */
2226 for (p = namebuf; *p; p++)
2227 *p = TOUPPER (*p);
2228
2229 if (strncmp (namebuf, newname, namelen))
2230 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2231 typeinfo.defined != 0 ? &typeinfo : NULL);
2232
2233 /* Insert name in all lowercase. */
2234 for (p = namebuf; *p; p++)
2235 *p = TOLOWER (*p);
2236
2237 if (strncmp (namebuf, newname, namelen))
2238 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2239 typeinfo.defined != 0 ? &typeinfo : NULL);
2240
2241 return 1;
2242 }
2243
2244 /* Should never be called, as .req goes between the alias and the
2245 register name, not at the beginning of the line. */
2246 static void
2247 s_req (int a ATTRIBUTE_UNUSED)
2248 {
2249 as_bad (_("invalid syntax for .req directive"));
2250 }
2251
2252 static void
2253 s_dn (int a ATTRIBUTE_UNUSED)
2254 {
2255 as_bad (_("invalid syntax for .dn directive"));
2256 }
2257
2258 static void
2259 s_qn (int a ATTRIBUTE_UNUSED)
2260 {
2261 as_bad (_("invalid syntax for .qn directive"));
2262 }
2263
2264 /* The .unreq directive deletes an alias which was previously defined
2265 by .req. For example:
2266
2267 my_alias .req r11
2268 .unreq my_alias */
2269
2270 static void
2271 s_unreq (int a ATTRIBUTE_UNUSED)
2272 {
2273 char * name;
2274 char saved_char;
2275
2276 name = input_line_pointer;
2277
2278 while (*input_line_pointer != 0
2279 && *input_line_pointer != ' '
2280 && *input_line_pointer != '\n')
2281 ++input_line_pointer;
2282
2283 saved_char = *input_line_pointer;
2284 *input_line_pointer = 0;
2285
2286 if (!*name)
2287 as_bad (_("invalid syntax for .unreq directive"));
2288 else
2289 {
2290 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2291
2292 if (!reg)
2293 as_bad (_("unknown register alias '%s'"), name);
2294 else if (reg->builtin)
2295 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2296 name);
2297 else
2298 {
2299 char * p;
2300 char * nbuf;
2301
2302 hash_delete (arm_reg_hsh, name, FALSE);
2303 free ((char *) reg->name);
2304 if (reg->neon)
2305 free (reg->neon);
2306 free (reg);
2307
2308 /* Also locate the all upper case and all lower case versions.
2309 Do not complain if we cannot find one or the other as it
2310 was probably deleted above. */
2311
2312 nbuf = strdup (name);
2313 for (p = nbuf; *p; p++)
2314 *p = TOUPPER (*p);
2315 reg = hash_find (arm_reg_hsh, nbuf);
2316 if (reg)
2317 {
2318 hash_delete (arm_reg_hsh, nbuf, FALSE);
2319 free ((char *) reg->name);
2320 if (reg->neon)
2321 free (reg->neon);
2322 free (reg);
2323 }
2324
2325 for (p = nbuf; *p; p++)
2326 *p = TOLOWER (*p);
2327 reg = hash_find (arm_reg_hsh, nbuf);
2328 if (reg)
2329 {
2330 hash_delete (arm_reg_hsh, nbuf, FALSE);
2331 free ((char *) reg->name);
2332 if (reg->neon)
2333 free (reg->neon);
2334 free (reg);
2335 }
2336
2337 free (nbuf);
2338 }
2339 }
2340
2341 *input_line_pointer = saved_char;
2342 demand_empty_rest_of_line ();
2343 }
2344
2345 /* Directives: Instruction set selection. */
2346
2347 #ifdef OBJ_ELF
2348 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2349 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2350 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2351 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2352
2353 static enum mstate mapstate = MAP_UNDEFINED;
2354
2355 void
2356 mapping_state (enum mstate state)
2357 {
2358 symbolS * symbolP;
2359 const char * symname;
2360 int type;
2361
2362 if (mapstate == state)
2363 /* The mapping symbol has already been emitted.
2364 There is nothing else to do. */
2365 return;
2366
2367 mapstate = state;
2368
2369 switch (state)
2370 {
2371 case MAP_DATA:
2372 symname = "$d";
2373 type = BSF_NO_FLAGS;
2374 break;
2375 case MAP_ARM:
2376 symname = "$a";
2377 type = BSF_NO_FLAGS;
2378 break;
2379 case MAP_THUMB:
2380 symname = "$t";
2381 type = BSF_NO_FLAGS;
2382 break;
2383 case MAP_UNDEFINED:
2384 return;
2385 default:
2386 abort ();
2387 }
2388
2389 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2390
2391 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2392 symbol_table_insert (symbolP);
2393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2394
2395 switch (state)
2396 {
2397 case MAP_ARM:
2398 THUMB_SET_FUNC (symbolP, 0);
2399 ARM_SET_THUMB (symbolP, 0);
2400 ARM_SET_INTERWORK (symbolP, support_interwork);
2401 break;
2402
2403 case MAP_THUMB:
2404 THUMB_SET_FUNC (symbolP, 1);
2405 ARM_SET_THUMB (symbolP, 1);
2406 ARM_SET_INTERWORK (symbolP, support_interwork);
2407 break;
2408
2409 case MAP_DATA:
2410 default:
2411 return;
2412 }
2413 }
2414 #else
2415 #define mapping_state(x) /* nothing */
2416 #endif
2417
2418 /* Find the real, Thumb encoded start of a Thumb function. */
2419
2420 static symbolS *
2421 find_real_start (symbolS * symbolP)
2422 {
2423 char * real_start;
2424 const char * name = S_GET_NAME (symbolP);
2425 symbolS * new_target;
2426
2427 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2428 #define STUB_NAME ".real_start_of"
2429
2430 if (name == NULL)
2431 abort ();
2432
2433 /* The compiler may generate BL instructions to local labels because
2434 it needs to perform a branch to a far away location. These labels
2435 do not have a corresponding ".real_start_of" label. We check
2436 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2437 the ".real_start_of" convention for nonlocal branches. */
2438 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2439 return symbolP;
2440
2441 real_start = ACONCAT ((STUB_NAME, name, NULL));
2442 new_target = symbol_find (real_start);
2443
2444 if (new_target == NULL)
2445 {
2446 as_warn (_("Failed to find real start of function: %s\n"), name);
2447 new_target = symbolP;
2448 }
2449
2450 return new_target;
2451 }
2452
2453 static void
2454 opcode_select (int width)
2455 {
2456 switch (width)
2457 {
2458 case 16:
2459 if (! thumb_mode)
2460 {
2461 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2462 as_bad (_("selected processor does not support THUMB opcodes"));
2463
2464 thumb_mode = 1;
2465 /* No need to force the alignment, since we will have been
2466 coming from ARM mode, which is word-aligned. */
2467 record_alignment (now_seg, 1);
2468 }
2469 mapping_state (MAP_THUMB);
2470 break;
2471
2472 case 32:
2473 if (thumb_mode)
2474 {
2475 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2476 as_bad (_("selected processor does not support ARM opcodes"));
2477
2478 thumb_mode = 0;
2479
2480 if (!need_pass_2)
2481 frag_align (2, 0, 0);
2482
2483 record_alignment (now_seg, 1);
2484 }
2485 mapping_state (MAP_ARM);
2486 break;
2487
2488 default:
2489 as_bad (_("invalid instruction size selected (%d)"), width);
2490 }
2491 }
2492
2493 static void
2494 s_arm (int ignore ATTRIBUTE_UNUSED)
2495 {
2496 opcode_select (32);
2497 demand_empty_rest_of_line ();
2498 }
2499
2500 static void
2501 s_thumb (int ignore ATTRIBUTE_UNUSED)
2502 {
2503 opcode_select (16);
2504 demand_empty_rest_of_line ();
2505 }
2506
2507 static void
2508 s_code (int unused ATTRIBUTE_UNUSED)
2509 {
2510 int temp;
2511
2512 temp = get_absolute_expression ();
2513 switch (temp)
2514 {
2515 case 16:
2516 case 32:
2517 opcode_select (temp);
2518 break;
2519
2520 default:
2521 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2522 }
2523 }
2524
2525 static void
2526 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2527 {
2528 /* If we are not already in thumb mode go into it, EVEN if
2529 the target processor does not support thumb instructions.
2530 This is used by gcc/config/arm/lib1funcs.asm for example
2531 to compile interworking support functions even if the
2532 target processor should not support interworking. */
2533 if (! thumb_mode)
2534 {
2535 thumb_mode = 2;
2536 record_alignment (now_seg, 1);
2537 }
2538
2539 demand_empty_rest_of_line ();
2540 }
2541
2542 static void
2543 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2544 {
2545 s_thumb (0);
2546
2547 /* The following label is the name/address of the start of a Thumb function.
2548 We need to know this for the interworking support. */
2549 label_is_thumb_function_name = TRUE;
2550 }
2551
2552 /* Perform a .set directive, but also mark the alias as
2553 being a thumb function. */
2554
2555 static void
2556 s_thumb_set (int equiv)
2557 {
2558 /* XXX the following is a duplicate of the code for s_set() in read.c
2559 We cannot just call that code as we need to get at the symbol that
2560 is created. */
2561 char * name;
2562 char delim;
2563 char * end_name;
2564 symbolS * symbolP;
2565
2566 /* Especial apologies for the random logic:
2567 This just grew, and could be parsed much more simply!
2568 Dean - in haste. */
2569 name = input_line_pointer;
2570 delim = get_symbol_end ();
2571 end_name = input_line_pointer;
2572 *end_name = delim;
2573
2574 if (*input_line_pointer != ',')
2575 {
2576 *end_name = 0;
2577 as_bad (_("expected comma after name \"%s\""), name);
2578 *end_name = delim;
2579 ignore_rest_of_line ();
2580 return;
2581 }
2582
2583 input_line_pointer++;
2584 *end_name = 0;
2585
2586 if (name[0] == '.' && name[1] == '\0')
2587 {
2588 /* XXX - this should not happen to .thumb_set. */
2589 abort ();
2590 }
2591
2592 if ((symbolP = symbol_find (name)) == NULL
2593 && (symbolP = md_undefined_symbol (name)) == NULL)
2594 {
2595 #ifndef NO_LISTING
2596 /* When doing symbol listings, play games with dummy fragments living
2597 outside the normal fragment chain to record the file and line info
2598 for this symbol. */
2599 if (listing & LISTING_SYMBOLS)
2600 {
2601 extern struct list_info_struct * listing_tail;
2602 fragS * dummy_frag = xmalloc (sizeof (fragS));
2603
2604 memset (dummy_frag, 0, sizeof (fragS));
2605 dummy_frag->fr_type = rs_fill;
2606 dummy_frag->line = listing_tail;
2607 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2608 dummy_frag->fr_symbol = symbolP;
2609 }
2610 else
2611 #endif
2612 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2613
2614 #ifdef OBJ_COFF
2615 /* "set" symbols are local unless otherwise specified. */
2616 SF_SET_LOCAL (symbolP);
2617 #endif /* OBJ_COFF */
2618 } /* Make a new symbol. */
2619
2620 symbol_table_insert (symbolP);
2621
2622 * end_name = delim;
2623
2624 if (equiv
2625 && S_IS_DEFINED (symbolP)
2626 && S_GET_SEGMENT (symbolP) != reg_section)
2627 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2628
2629 pseudo_set (symbolP);
2630
2631 demand_empty_rest_of_line ();
2632
2633 /* XXX Now we come to the Thumb specific bit of code. */
2634
2635 THUMB_SET_FUNC (symbolP, 1);
2636 ARM_SET_THUMB (symbolP, 1);
2637 #if defined OBJ_ELF || defined OBJ_COFF
2638 ARM_SET_INTERWORK (symbolP, support_interwork);
2639 #endif
2640 }
2641
2642 /* Directives: Mode selection. */
2643
2644 /* .syntax [unified|divided] - choose the new unified syntax
2645 (same for Arm and Thumb encoding, modulo slight differences in what
2646 can be represented) or the old divergent syntax for each mode. */
2647 static void
2648 s_syntax (int unused ATTRIBUTE_UNUSED)
2649 {
2650 char *name, delim;
2651
2652 name = input_line_pointer;
2653 delim = get_symbol_end ();
2654
2655 if (!strcasecmp (name, "unified"))
2656 unified_syntax = TRUE;
2657 else if (!strcasecmp (name, "divided"))
2658 unified_syntax = FALSE;
2659 else
2660 {
2661 as_bad (_("unrecognized syntax mode \"%s\""), name);
2662 return;
2663 }
2664 *input_line_pointer = delim;
2665 demand_empty_rest_of_line ();
2666 }
2667
2668 /* Directives: sectioning and alignment. */
2669
2670 /* Same as s_align_ptwo but align 0 => align 2. */
2671
2672 static void
2673 s_align (int unused ATTRIBUTE_UNUSED)
2674 {
2675 int temp;
2676 bfd_boolean fill_p;
2677 long temp_fill;
2678 long max_alignment = 15;
2679
2680 temp = get_absolute_expression ();
2681 if (temp > max_alignment)
2682 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2683 else if (temp < 0)
2684 {
2685 as_bad (_("alignment negative. 0 assumed."));
2686 temp = 0;
2687 }
2688
2689 if (*input_line_pointer == ',')
2690 {
2691 input_line_pointer++;
2692 temp_fill = get_absolute_expression ();
2693 fill_p = TRUE;
2694 }
2695 else
2696 {
2697 fill_p = FALSE;
2698 temp_fill = 0;
2699 }
2700
2701 if (!temp)
2702 temp = 2;
2703
2704 /* Only make a frag if we HAVE to. */
2705 if (temp && !need_pass_2)
2706 {
2707 if (!fill_p && subseg_text_p (now_seg))
2708 frag_align_code (temp, 0);
2709 else
2710 frag_align (temp, (int) temp_fill, 0);
2711 }
2712 demand_empty_rest_of_line ();
2713
2714 record_alignment (now_seg, temp);
2715 }
2716
2717 static void
2718 s_bss (int ignore ATTRIBUTE_UNUSED)
2719 {
2720 /* We don't support putting frags in the BSS segment, we fake it by
2721 marking in_bss, then looking at s_skip for clues. */
2722 subseg_set (bss_section, 0);
2723 demand_empty_rest_of_line ();
2724 mapping_state (MAP_DATA);
2725 }
2726
2727 static void
2728 s_even (int ignore ATTRIBUTE_UNUSED)
2729 {
2730 /* Never make frag if expect extra pass. */
2731 if (!need_pass_2)
2732 frag_align (1, 0, 0);
2733
2734 record_alignment (now_seg, 1);
2735
2736 demand_empty_rest_of_line ();
2737 }
2738
2739 /* Directives: Literal pools. */
2740
2741 static literal_pool *
2742 find_literal_pool (void)
2743 {
2744 literal_pool * pool;
2745
2746 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2747 {
2748 if (pool->section == now_seg
2749 && pool->sub_section == now_subseg)
2750 break;
2751 }
2752
2753 return pool;
2754 }
2755
2756 static literal_pool *
2757 find_or_make_literal_pool (void)
2758 {
2759 /* Next literal pool ID number. */
2760 static unsigned int latest_pool_num = 1;
2761 literal_pool * pool;
2762
2763 pool = find_literal_pool ();
2764
2765 if (pool == NULL)
2766 {
2767 /* Create a new pool. */
2768 pool = xmalloc (sizeof (* pool));
2769 if (! pool)
2770 return NULL;
2771
2772 pool->next_free_entry = 0;
2773 pool->section = now_seg;
2774 pool->sub_section = now_subseg;
2775 pool->next = list_of_pools;
2776 pool->symbol = NULL;
2777
2778 /* Add it to the list. */
2779 list_of_pools = pool;
2780 }
2781
2782 /* New pools, and emptied pools, will have a NULL symbol. */
2783 if (pool->symbol == NULL)
2784 {
2785 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2786 (valueT) 0, &zero_address_frag);
2787 pool->id = latest_pool_num ++;
2788 }
2789
2790 /* Done. */
2791 return pool;
2792 }
2793
2794 /* Add the literal in the global 'inst'
2795 structure to the relevant literal pool. */
2796
2797 static int
2798 add_to_lit_pool (void)
2799 {
2800 literal_pool * pool;
2801 unsigned int entry;
2802
2803 pool = find_or_make_literal_pool ();
2804
2805 /* Check if this literal value is already in the pool. */
2806 for (entry = 0; entry < pool->next_free_entry; entry ++)
2807 {
2808 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2809 && (inst.reloc.exp.X_op == O_constant)
2810 && (pool->literals[entry].X_add_number
2811 == inst.reloc.exp.X_add_number)
2812 && (pool->literals[entry].X_unsigned
2813 == inst.reloc.exp.X_unsigned))
2814 break;
2815
2816 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2817 && (inst.reloc.exp.X_op == O_symbol)
2818 && (pool->literals[entry].X_add_number
2819 == inst.reloc.exp.X_add_number)
2820 && (pool->literals[entry].X_add_symbol
2821 == inst.reloc.exp.X_add_symbol)
2822 && (pool->literals[entry].X_op_symbol
2823 == inst.reloc.exp.X_op_symbol))
2824 break;
2825 }
2826
2827 /* Do we need to create a new entry? */
2828 if (entry == pool->next_free_entry)
2829 {
2830 if (entry >= MAX_LITERAL_POOL_SIZE)
2831 {
2832 inst.error = _("literal pool overflow");
2833 return FAIL;
2834 }
2835
2836 pool->literals[entry] = inst.reloc.exp;
2837 pool->next_free_entry += 1;
2838 }
2839
2840 inst.reloc.exp.X_op = O_symbol;
2841 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2842 inst.reloc.exp.X_add_symbol = pool->symbol;
2843
2844 return SUCCESS;
2845 }
2846
2847 /* Can't use symbol_new here, so have to create a symbol and then at
2848 a later date assign it a value. Thats what these functions do. */
2849
2850 static void
2851 symbol_locate (symbolS * symbolP,
2852 const char * name, /* It is copied, the caller can modify. */
2853 segT segment, /* Segment identifier (SEG_<something>). */
2854 valueT valu, /* Symbol value. */
2855 fragS * frag) /* Associated fragment. */
2856 {
2857 unsigned int name_length;
2858 char * preserved_copy_of_name;
2859
2860 name_length = strlen (name) + 1; /* +1 for \0. */
2861 obstack_grow (&notes, name, name_length);
2862 preserved_copy_of_name = obstack_finish (&notes);
2863
2864 #ifdef tc_canonicalize_symbol_name
2865 preserved_copy_of_name =
2866 tc_canonicalize_symbol_name (preserved_copy_of_name);
2867 #endif
2868
2869 S_SET_NAME (symbolP, preserved_copy_of_name);
2870
2871 S_SET_SEGMENT (symbolP, segment);
2872 S_SET_VALUE (symbolP, valu);
2873 symbol_clear_list_pointers (symbolP);
2874
2875 symbol_set_frag (symbolP, frag);
2876
2877 /* Link to end of symbol chain. */
2878 {
2879 extern int symbol_table_frozen;
2880
2881 if (symbol_table_frozen)
2882 abort ();
2883 }
2884
2885 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2886
2887 obj_symbol_new_hook (symbolP);
2888
2889 #ifdef tc_symbol_new_hook
2890 tc_symbol_new_hook (symbolP);
2891 #endif
2892
2893 #ifdef DEBUG_SYMS
2894 verify_symbol_chain (symbol_rootP, symbol_lastP);
2895 #endif /* DEBUG_SYMS */
2896 }
2897
2898
2899 static void
2900 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2901 {
2902 unsigned int entry;
2903 literal_pool * pool;
2904 char sym_name[20];
2905
2906 pool = find_literal_pool ();
2907 if (pool == NULL
2908 || pool->symbol == NULL
2909 || pool->next_free_entry == 0)
2910 return;
2911
2912 mapping_state (MAP_DATA);
2913
2914 /* Align pool as you have word accesses.
2915 Only make a frag if we have to. */
2916 if (!need_pass_2)
2917 frag_align (2, 0, 0);
2918
2919 record_alignment (now_seg, 2);
2920
2921 sprintf (sym_name, "$$lit_\002%x", pool->id);
2922
2923 symbol_locate (pool->symbol, sym_name, now_seg,
2924 (valueT) frag_now_fix (), frag_now);
2925 symbol_table_insert (pool->symbol);
2926
2927 ARM_SET_THUMB (pool->symbol, thumb_mode);
2928
2929 #if defined OBJ_COFF || defined OBJ_ELF
2930 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2931 #endif
2932
2933 for (entry = 0; entry < pool->next_free_entry; entry ++)
2934 /* First output the expression in the instruction to the pool. */
2935 emit_expr (&(pool->literals[entry]), 4); /* .word */
2936
2937 /* Mark the pool as empty. */
2938 pool->next_free_entry = 0;
2939 pool->symbol = NULL;
2940 }
2941
2942 #ifdef OBJ_ELF
2943 /* Forward declarations for functions below, in the MD interface
2944 section. */
2945 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2946 static valueT create_unwind_entry (int);
2947 static void start_unwind_section (const segT, int);
2948 static void add_unwind_opcode (valueT, int);
2949 static void flush_pending_unwind (void);
2950
2951 /* Directives: Data. */
2952
2953 static void
2954 s_arm_elf_cons (int nbytes)
2955 {
2956 expressionS exp;
2957
2958 #ifdef md_flush_pending_output
2959 md_flush_pending_output ();
2960 #endif
2961
2962 if (is_it_end_of_statement ())
2963 {
2964 demand_empty_rest_of_line ();
2965 return;
2966 }
2967
2968 #ifdef md_cons_align
2969 md_cons_align (nbytes);
2970 #endif
2971
2972 mapping_state (MAP_DATA);
2973 do
2974 {
2975 int reloc;
2976 char *base = input_line_pointer;
2977
2978 expression (& exp);
2979
2980 if (exp.X_op != O_symbol)
2981 emit_expr (&exp, (unsigned int) nbytes);
2982 else
2983 {
2984 char *before_reloc = input_line_pointer;
2985 reloc = parse_reloc (&input_line_pointer);
2986 if (reloc == -1)
2987 {
2988 as_bad (_("unrecognized relocation suffix"));
2989 ignore_rest_of_line ();
2990 return;
2991 }
2992 else if (reloc == BFD_RELOC_UNUSED)
2993 emit_expr (&exp, (unsigned int) nbytes);
2994 else
2995 {
2996 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2997 int size = bfd_get_reloc_size (howto);
2998
2999 if (reloc == BFD_RELOC_ARM_PLT32)
3000 {
3001 as_bad (_("(plt) is only valid on branch targets"));
3002 reloc = BFD_RELOC_UNUSED;
3003 size = 0;
3004 }
3005
3006 if (size > nbytes)
3007 as_bad (_("%s relocations do not fit in %d bytes"),
3008 howto->name, nbytes);
3009 else
3010 {
3011 /* We've parsed an expression stopping at O_symbol.
3012 But there may be more expression left now that we
3013 have parsed the relocation marker. Parse it again.
3014 XXX Surely there is a cleaner way to do this. */
3015 char *p = input_line_pointer;
3016 int offset;
3017 char *save_buf = alloca (input_line_pointer - base);
3018 memcpy (save_buf, base, input_line_pointer - base);
3019 memmove (base + (input_line_pointer - before_reloc),
3020 base, before_reloc - base);
3021
3022 input_line_pointer = base + (input_line_pointer-before_reloc);
3023 expression (&exp);
3024 memcpy (base, save_buf, p - base);
3025
3026 offset = nbytes - size;
3027 p = frag_more ((int) nbytes);
3028 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3029 size, &exp, 0, reloc);
3030 }
3031 }
3032 }
3033 }
3034 while (*input_line_pointer++ == ',');
3035
3036 /* Put terminator back into stream. */
3037 input_line_pointer --;
3038 demand_empty_rest_of_line ();
3039 }
3040
3041
3042 /* Parse a .rel31 directive. */
3043
3044 static void
3045 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3046 {
3047 expressionS exp;
3048 char *p;
3049 valueT highbit;
3050
3051 highbit = 0;
3052 if (*input_line_pointer == '1')
3053 highbit = 0x80000000;
3054 else if (*input_line_pointer != '0')
3055 as_bad (_("expected 0 or 1"));
3056
3057 input_line_pointer++;
3058 if (*input_line_pointer != ',')
3059 as_bad (_("missing comma"));
3060 input_line_pointer++;
3061
3062 #ifdef md_flush_pending_output
3063 md_flush_pending_output ();
3064 #endif
3065
3066 #ifdef md_cons_align
3067 md_cons_align (4);
3068 #endif
3069
3070 mapping_state (MAP_DATA);
3071
3072 expression (&exp);
3073
3074 p = frag_more (4);
3075 md_number_to_chars (p, highbit, 4);
3076 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3077 BFD_RELOC_ARM_PREL31);
3078
3079 demand_empty_rest_of_line ();
3080 }
3081
3082 /* Directives: AEABI stack-unwind tables. */
3083
3084 /* Parse an unwind_fnstart directive. Simply records the current location. */
3085
3086 static void
3087 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3088 {
3089 demand_empty_rest_of_line ();
3090 /* Mark the start of the function. */
3091 unwind.proc_start = expr_build_dot ();
3092
3093 /* Reset the rest of the unwind info. */
3094 unwind.opcode_count = 0;
3095 unwind.table_entry = NULL;
3096 unwind.personality_routine = NULL;
3097 unwind.personality_index = -1;
3098 unwind.frame_size = 0;
3099 unwind.fp_offset = 0;
3100 unwind.fp_reg = REG_SP;
3101 unwind.fp_used = 0;
3102 unwind.sp_restored = 0;
3103 }
3104
3105
3106 /* Parse a handlerdata directive. Creates the exception handling table entry
3107 for the function. */
3108
3109 static void
3110 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3111 {
3112 demand_empty_rest_of_line ();
3113 if (unwind.table_entry)
3114 as_bad (_("duplicate .handlerdata directive"));
3115
3116 create_unwind_entry (1);
3117 }
3118
3119 /* Parse an unwind_fnend directive. Generates the index table entry. */
3120
3121 static void
3122 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3123 {
3124 long where;
3125 char *ptr;
3126 valueT val;
3127
3128 demand_empty_rest_of_line ();
3129
3130 /* Add eh table entry. */
3131 if (unwind.table_entry == NULL)
3132 val = create_unwind_entry (0);
3133 else
3134 val = 0;
3135
3136 /* Add index table entry. This is two words. */
3137 start_unwind_section (unwind.saved_seg, 1);
3138 frag_align (2, 0, 0);
3139 record_alignment (now_seg, 2);
3140
3141 ptr = frag_more (8);
3142 where = frag_now_fix () - 8;
3143
3144 /* Self relative offset of the function start. */
3145 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3146 BFD_RELOC_ARM_PREL31);
3147
3148 /* Indicate dependency on EHABI-defined personality routines to the
3149 linker, if it hasn't been done already. */
3150 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3151 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3152 {
3153 static const char *const name[] =
3154 {
3155 "__aeabi_unwind_cpp_pr0",
3156 "__aeabi_unwind_cpp_pr1",
3157 "__aeabi_unwind_cpp_pr2"
3158 };
3159 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3160 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3161 marked_pr_dependency |= 1 << unwind.personality_index;
3162 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3163 = marked_pr_dependency;
3164 }
3165
3166 if (val)
3167 /* Inline exception table entry. */
3168 md_number_to_chars (ptr + 4, val, 4);
3169 else
3170 /* Self relative offset of the table entry. */
3171 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3172 BFD_RELOC_ARM_PREL31);
3173
3174 /* Restore the original section. */
3175 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3176 }
3177
3178
3179 /* Parse an unwind_cantunwind directive. */
3180
3181 static void
3182 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3183 {
3184 demand_empty_rest_of_line ();
3185 if (unwind.personality_routine || unwind.personality_index != -1)
3186 as_bad (_("personality routine specified for cantunwind frame"));
3187
3188 unwind.personality_index = -2;
3189 }
3190
3191
3192 /* Parse a personalityindex directive. */
3193
3194 static void
3195 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3196 {
3197 expressionS exp;
3198
3199 if (unwind.personality_routine || unwind.personality_index != -1)
3200 as_bad (_("duplicate .personalityindex directive"));
3201
3202 expression (&exp);
3203
3204 if (exp.X_op != O_constant
3205 || exp.X_add_number < 0 || exp.X_add_number > 15)
3206 {
3207 as_bad (_("bad personality routine number"));
3208 ignore_rest_of_line ();
3209 return;
3210 }
3211
3212 unwind.personality_index = exp.X_add_number;
3213
3214 demand_empty_rest_of_line ();
3215 }
3216
3217
3218 /* Parse a personality directive. */
3219
3220 static void
3221 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3222 {
3223 char *name, *p, c;
3224
3225 if (unwind.personality_routine || unwind.personality_index != -1)
3226 as_bad (_("duplicate .personality directive"));
3227
3228 name = input_line_pointer;
3229 c = get_symbol_end ();
3230 p = input_line_pointer;
3231 unwind.personality_routine = symbol_find_or_make (name);
3232 *p = c;
3233 demand_empty_rest_of_line ();
3234 }
3235
3236
3237 /* Parse a directive saving core registers. */
3238
3239 static void
3240 s_arm_unwind_save_core (void)
3241 {
3242 valueT op;
3243 long range;
3244 int n;
3245
3246 range = parse_reg_list (&input_line_pointer);
3247 if (range == FAIL)
3248 {
3249 as_bad (_("expected register list"));
3250 ignore_rest_of_line ();
3251 return;
3252 }
3253
3254 demand_empty_rest_of_line ();
3255
3256 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3257 into .unwind_save {..., sp...}. We aren't bothered about the value of
3258 ip because it is clobbered by calls. */
3259 if (unwind.sp_restored && unwind.fp_reg == 12
3260 && (range & 0x3000) == 0x1000)
3261 {
3262 unwind.opcode_count--;
3263 unwind.sp_restored = 0;
3264 range = (range | 0x2000) & ~0x1000;
3265 unwind.pending_offset = 0;
3266 }
3267
3268 /* Pop r4-r15. */
3269 if (range & 0xfff0)
3270 {
3271 /* See if we can use the short opcodes. These pop a block of up to 8
3272 registers starting with r4, plus maybe r14. */
3273 for (n = 0; n < 8; n++)
3274 {
3275 /* Break at the first non-saved register. */
3276 if ((range & (1 << (n + 4))) == 0)
3277 break;
3278 }
3279 /* See if there are any other bits set. */
3280 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3281 {
3282 /* Use the long form. */
3283 op = 0x8000 | ((range >> 4) & 0xfff);
3284 add_unwind_opcode (op, 2);
3285 }
3286 else
3287 {
3288 /* Use the short form. */
3289 if (range & 0x4000)
3290 op = 0xa8; /* Pop r14. */
3291 else
3292 op = 0xa0; /* Do not pop r14. */
3293 op |= (n - 1);
3294 add_unwind_opcode (op, 1);
3295 }
3296 }
3297
3298 /* Pop r0-r3. */
3299 if (range & 0xf)
3300 {
3301 op = 0xb100 | (range & 0xf);
3302 add_unwind_opcode (op, 2);
3303 }
3304
3305 /* Record the number of bytes pushed. */
3306 for (n = 0; n < 16; n++)
3307 {
3308 if (range & (1 << n))
3309 unwind.frame_size += 4;
3310 }
3311 }
3312
3313
3314 /* Parse a directive saving FPA registers. */
3315
3316 static void
3317 s_arm_unwind_save_fpa (int reg)
3318 {
3319 expressionS exp;
3320 int num_regs;
3321 valueT op;
3322
3323 /* Get Number of registers to transfer. */
3324 if (skip_past_comma (&input_line_pointer) != FAIL)
3325 expression (&exp);
3326 else
3327 exp.X_op = O_illegal;
3328
3329 if (exp.X_op != O_constant)
3330 {
3331 as_bad (_("expected , <constant>"));
3332 ignore_rest_of_line ();
3333 return;
3334 }
3335
3336 num_regs = exp.X_add_number;
3337
3338 if (num_regs < 1 || num_regs > 4)
3339 {
3340 as_bad (_("number of registers must be in the range [1:4]"));
3341 ignore_rest_of_line ();
3342 return;
3343 }
3344
3345 demand_empty_rest_of_line ();
3346
3347 if (reg == 4)
3348 {
3349 /* Short form. */
3350 op = 0xb4 | (num_regs - 1);
3351 add_unwind_opcode (op, 1);
3352 }
3353 else
3354 {
3355 /* Long form. */
3356 op = 0xc800 | (reg << 4) | (num_regs - 1);
3357 add_unwind_opcode (op, 2);
3358 }
3359 unwind.frame_size += num_regs * 12;
3360 }
3361
3362
3363 /* Parse a directive saving VFP registers for ARMv6 and above. */
3364
3365 static void
3366 s_arm_unwind_save_vfp_armv6 (void)
3367 {
3368 int count;
3369 unsigned int start;
3370 valueT op;
3371 int num_vfpv3_regs = 0;
3372 int num_regs_below_16;
3373
3374 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3375 if (count == FAIL)
3376 {
3377 as_bad (_("expected register list"));
3378 ignore_rest_of_line ();
3379 return;
3380 }
3381
3382 demand_empty_rest_of_line ();
3383
3384 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3385 than FSTMX/FLDMX-style ones). */
3386
3387 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3388 if (start >= 16)
3389 num_vfpv3_regs = count;
3390 else if (start + count > 16)
3391 num_vfpv3_regs = start + count - 16;
3392
3393 if (num_vfpv3_regs > 0)
3394 {
3395 int start_offset = start > 16 ? start - 16 : 0;
3396 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3397 add_unwind_opcode (op, 2);
3398 }
3399
3400 /* Generate opcode for registers numbered in the range 0 .. 15. */
3401 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3402 assert (num_regs_below_16 + num_vfpv3_regs == count);
3403 if (num_regs_below_16 > 0)
3404 {
3405 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3406 add_unwind_opcode (op, 2);
3407 }
3408
3409 unwind.frame_size += count * 8;
3410 }
3411
3412
3413 /* Parse a directive saving VFP registers for pre-ARMv6. */
3414
3415 static void
3416 s_arm_unwind_save_vfp (void)
3417 {
3418 int count;
3419 unsigned int reg;
3420 valueT op;
3421
3422 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3423 if (count == FAIL)
3424 {
3425 as_bad (_("expected register list"));
3426 ignore_rest_of_line ();
3427 return;
3428 }
3429
3430 demand_empty_rest_of_line ();
3431
3432 if (reg == 8)
3433 {
3434 /* Short form. */
3435 op = 0xb8 | (count - 1);
3436 add_unwind_opcode (op, 1);
3437 }
3438 else
3439 {
3440 /* Long form. */
3441 op = 0xb300 | (reg << 4) | (count - 1);
3442 add_unwind_opcode (op, 2);
3443 }
3444 unwind.frame_size += count * 8 + 4;
3445 }
3446
3447
3448 /* Parse a directive saving iWMMXt data registers. */
3449
3450 static void
3451 s_arm_unwind_save_mmxwr (void)
3452 {
3453 int reg;
3454 int hi_reg;
3455 int i;
3456 unsigned mask = 0;
3457 valueT op;
3458
3459 if (*input_line_pointer == '{')
3460 input_line_pointer++;
3461
3462 do
3463 {
3464 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3465
3466 if (reg == FAIL)
3467 {
3468 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3469 goto error;
3470 }
3471
3472 if (mask >> reg)
3473 as_tsktsk (_("register list not in ascending order"));
3474 mask |= 1 << reg;
3475
3476 if (*input_line_pointer == '-')
3477 {
3478 input_line_pointer++;
3479 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3480 if (hi_reg == FAIL)
3481 {
3482 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3483 goto error;
3484 }
3485 else if (reg >= hi_reg)
3486 {
3487 as_bad (_("bad register range"));
3488 goto error;
3489 }
3490 for (; reg < hi_reg; reg++)
3491 mask |= 1 << reg;
3492 }
3493 }
3494 while (skip_past_comma (&input_line_pointer) != FAIL);
3495
3496 if (*input_line_pointer == '}')
3497 input_line_pointer++;
3498
3499 demand_empty_rest_of_line ();
3500
3501 /* Generate any deferred opcodes because we're going to be looking at
3502 the list. */
3503 flush_pending_unwind ();
3504
3505 for (i = 0; i < 16; i++)
3506 {
3507 if (mask & (1 << i))
3508 unwind.frame_size += 8;
3509 }
3510
3511 /* Attempt to combine with a previous opcode. We do this because gcc
3512 likes to output separate unwind directives for a single block of
3513 registers. */
3514 if (unwind.opcode_count > 0)
3515 {
3516 i = unwind.opcodes[unwind.opcode_count - 1];
3517 if ((i & 0xf8) == 0xc0)
3518 {
3519 i &= 7;
3520 /* Only merge if the blocks are contiguous. */
3521 if (i < 6)
3522 {
3523 if ((mask & 0xfe00) == (1 << 9))
3524 {
3525 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3526 unwind.opcode_count--;
3527 }
3528 }
3529 else if (i == 6 && unwind.opcode_count >= 2)
3530 {
3531 i = unwind.opcodes[unwind.opcode_count - 2];
3532 reg = i >> 4;
3533 i &= 0xf;
3534
3535 op = 0xffff << (reg - 1);
3536 if (reg > 0
3537 && ((mask & op) == (1u << (reg - 1))))
3538 {
3539 op = (1 << (reg + i + 1)) - 1;
3540 op &= ~((1 << reg) - 1);
3541 mask |= op;
3542 unwind.opcode_count -= 2;
3543 }
3544 }
3545 }
3546 }
3547
3548 hi_reg = 15;
3549 /* We want to generate opcodes in the order the registers have been
3550 saved, ie. descending order. */
3551 for (reg = 15; reg >= -1; reg--)
3552 {
3553 /* Save registers in blocks. */
3554 if (reg < 0
3555 || !(mask & (1 << reg)))
3556 {
3557 /* We found an unsaved reg. Generate opcodes to save the
3558 preceding block. */
3559 if (reg != hi_reg)
3560 {
3561 if (reg == 9)
3562 {
3563 /* Short form. */
3564 op = 0xc0 | (hi_reg - 10);
3565 add_unwind_opcode (op, 1);
3566 }
3567 else
3568 {
3569 /* Long form. */
3570 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3571 add_unwind_opcode (op, 2);
3572 }
3573 }
3574 hi_reg = reg - 1;
3575 }
3576 }
3577
3578 return;
3579 error:
3580 ignore_rest_of_line ();
3581 }
3582
3583 static void
3584 s_arm_unwind_save_mmxwcg (void)
3585 {
3586 int reg;
3587 int hi_reg;
3588 unsigned mask = 0;
3589 valueT op;
3590
3591 if (*input_line_pointer == '{')
3592 input_line_pointer++;
3593
3594 do
3595 {
3596 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3597
3598 if (reg == FAIL)
3599 {
3600 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3601 goto error;
3602 }
3603
3604 reg -= 8;
3605 if (mask >> reg)
3606 as_tsktsk (_("register list not in ascending order"));
3607 mask |= 1 << reg;
3608
3609 if (*input_line_pointer == '-')
3610 {
3611 input_line_pointer++;
3612 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3613 if (hi_reg == FAIL)
3614 {
3615 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3616 goto error;
3617 }
3618 else if (reg >= hi_reg)
3619 {
3620 as_bad (_("bad register range"));
3621 goto error;
3622 }
3623 for (; reg < hi_reg; reg++)
3624 mask |= 1 << reg;
3625 }
3626 }
3627 while (skip_past_comma (&input_line_pointer) != FAIL);
3628
3629 if (*input_line_pointer == '}')
3630 input_line_pointer++;
3631
3632 demand_empty_rest_of_line ();
3633
3634 /* Generate any deferred opcodes because we're going to be looking at
3635 the list. */
3636 flush_pending_unwind ();
3637
3638 for (reg = 0; reg < 16; reg++)
3639 {
3640 if (mask & (1 << reg))
3641 unwind.frame_size += 4;
3642 }
3643 op = 0xc700 | mask;
3644 add_unwind_opcode (op, 2);
3645 return;
3646 error:
3647 ignore_rest_of_line ();
3648 }
3649
3650
3651 /* Parse an unwind_save directive.
3652 If the argument is non-zero, this is a .vsave directive. */
3653
3654 static void
3655 s_arm_unwind_save (int arch_v6)
3656 {
3657 char *peek;
3658 struct reg_entry *reg;
3659 bfd_boolean had_brace = FALSE;
3660
3661 /* Figure out what sort of save we have. */
3662 peek = input_line_pointer;
3663
3664 if (*peek == '{')
3665 {
3666 had_brace = TRUE;
3667 peek++;
3668 }
3669
3670 reg = arm_reg_parse_multi (&peek);
3671
3672 if (!reg)
3673 {
3674 as_bad (_("register expected"));
3675 ignore_rest_of_line ();
3676 return;
3677 }
3678
3679 switch (reg->type)
3680 {
3681 case REG_TYPE_FN:
3682 if (had_brace)
3683 {
3684 as_bad (_("FPA .unwind_save does not take a register list"));
3685 ignore_rest_of_line ();
3686 return;
3687 }
3688 input_line_pointer = peek;
3689 s_arm_unwind_save_fpa (reg->number);
3690 return;
3691
3692 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3693 case REG_TYPE_VFD:
3694 if (arch_v6)
3695 s_arm_unwind_save_vfp_armv6 ();
3696 else
3697 s_arm_unwind_save_vfp ();
3698 return;
3699 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3700 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3701
3702 default:
3703 as_bad (_(".unwind_save does not support this kind of register"));
3704 ignore_rest_of_line ();
3705 }
3706 }
3707
3708
3709 /* Parse an unwind_movsp directive. */
3710
3711 static void
3712 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3713 {
3714 int reg;
3715 valueT op;
3716 int offset;
3717
3718 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3719 if (reg == FAIL)
3720 {
3721 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
3722 ignore_rest_of_line ();
3723 return;
3724 }
3725
3726 /* Optional constant. */
3727 if (skip_past_comma (&input_line_pointer) != FAIL)
3728 {
3729 if (immediate_for_directive (&offset) == FAIL)
3730 return;
3731 }
3732 else
3733 offset = 0;
3734
3735 demand_empty_rest_of_line ();
3736
3737 if (reg == REG_SP || reg == REG_PC)
3738 {
3739 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3740 return;
3741 }
3742
3743 if (unwind.fp_reg != REG_SP)
3744 as_bad (_("unexpected .unwind_movsp directive"));
3745
3746 /* Generate opcode to restore the value. */
3747 op = 0x90 | reg;
3748 add_unwind_opcode (op, 1);
3749
3750 /* Record the information for later. */
3751 unwind.fp_reg = reg;
3752 unwind.fp_offset = unwind.frame_size - offset;
3753 unwind.sp_restored = 1;
3754 }
3755
3756 /* Parse an unwind_pad directive. */
3757
3758 static void
3759 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3760 {
3761 int offset;
3762
3763 if (immediate_for_directive (&offset) == FAIL)
3764 return;
3765
3766 if (offset & 3)
3767 {
3768 as_bad (_("stack increment must be multiple of 4"));
3769 ignore_rest_of_line ();
3770 return;
3771 }
3772
3773 /* Don't generate any opcodes, just record the details for later. */
3774 unwind.frame_size += offset;
3775 unwind.pending_offset += offset;
3776
3777 demand_empty_rest_of_line ();
3778 }
3779
3780 /* Parse an unwind_setfp directive. */
3781
3782 static void
3783 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3784 {
3785 int sp_reg;
3786 int fp_reg;
3787 int offset;
3788
3789 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3790 if (skip_past_comma (&input_line_pointer) == FAIL)
3791 sp_reg = FAIL;
3792 else
3793 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3794
3795 if (fp_reg == FAIL || sp_reg == FAIL)
3796 {
3797 as_bad (_("expected <reg>, <reg>"));
3798 ignore_rest_of_line ();
3799 return;
3800 }
3801
3802 /* Optional constant. */
3803 if (skip_past_comma (&input_line_pointer) != FAIL)
3804 {
3805 if (immediate_for_directive (&offset) == FAIL)
3806 return;
3807 }
3808 else
3809 offset = 0;
3810
3811 demand_empty_rest_of_line ();
3812
3813 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
3814 {
3815 as_bad (_("register must be either sp or set by a previous"
3816 "unwind_movsp directive"));
3817 return;
3818 }
3819
3820 /* Don't generate any opcodes, just record the information for later. */
3821 unwind.fp_reg = fp_reg;
3822 unwind.fp_used = 1;
3823 if (sp_reg == REG_SP)
3824 unwind.fp_offset = unwind.frame_size - offset;
3825 else
3826 unwind.fp_offset -= offset;
3827 }
3828
3829 /* Parse an unwind_raw directive. */
3830
3831 static void
3832 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3833 {
3834 expressionS exp;
3835 /* This is an arbitrary limit. */
3836 unsigned char op[16];
3837 int count;
3838
3839 expression (&exp);
3840 if (exp.X_op == O_constant
3841 && skip_past_comma (&input_line_pointer) != FAIL)
3842 {
3843 unwind.frame_size += exp.X_add_number;
3844 expression (&exp);
3845 }
3846 else
3847 exp.X_op = O_illegal;
3848
3849 if (exp.X_op != O_constant)
3850 {
3851 as_bad (_("expected <offset>, <opcode>"));
3852 ignore_rest_of_line ();
3853 return;
3854 }
3855
3856 count = 0;
3857
3858 /* Parse the opcode. */
3859 for (;;)
3860 {
3861 if (count >= 16)
3862 {
3863 as_bad (_("unwind opcode too long"));
3864 ignore_rest_of_line ();
3865 }
3866 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3867 {
3868 as_bad (_("invalid unwind opcode"));
3869 ignore_rest_of_line ();
3870 return;
3871 }
3872 op[count++] = exp.X_add_number;
3873
3874 /* Parse the next byte. */
3875 if (skip_past_comma (&input_line_pointer) == FAIL)
3876 break;
3877
3878 expression (&exp);
3879 }
3880
3881 /* Add the opcode bytes in reverse order. */
3882 while (count--)
3883 add_unwind_opcode (op[count], 1);
3884
3885 demand_empty_rest_of_line ();
3886 }
3887
3888
3889 /* Parse a .eabi_attribute directive. */
3890
3891 static void
3892 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3893 {
3894 int tag = s_vendor_attribute (OBJ_ATTR_PROC);
3895
3896 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
3897 attributes_set_explicitly[tag] = 1;
3898 }
3899 #endif /* OBJ_ELF */
3900
3901 static void s_arm_arch (int);
3902 static void s_arm_object_arch (int);
3903 static void s_arm_cpu (int);
3904 static void s_arm_fpu (int);
3905
3906 #ifdef TE_PE
3907
3908 static void
3909 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3910 {
3911 expressionS exp;
3912
3913 do
3914 {
3915 expression (&exp);
3916 if (exp.X_op == O_symbol)
3917 exp.X_op = O_secrel;
3918
3919 emit_expr (&exp, 4);
3920 }
3921 while (*input_line_pointer++ == ',');
3922
3923 input_line_pointer--;
3924 demand_empty_rest_of_line ();
3925 }
3926 #endif /* TE_PE */
3927
3928 /* This table describes all the machine specific pseudo-ops the assembler
3929 has to support. The fields are:
3930 pseudo-op name without dot
3931 function to call to execute this pseudo-op
3932 Integer arg to pass to the function. */
3933
3934 const pseudo_typeS md_pseudo_table[] =
3935 {
3936 /* Never called because '.req' does not start a line. */
3937 { "req", s_req, 0 },
3938 /* Following two are likewise never called. */
3939 { "dn", s_dn, 0 },
3940 { "qn", s_qn, 0 },
3941 { "unreq", s_unreq, 0 },
3942 { "bss", s_bss, 0 },
3943 { "align", s_align, 0 },
3944 { "arm", s_arm, 0 },
3945 { "thumb", s_thumb, 0 },
3946 { "code", s_code, 0 },
3947 { "force_thumb", s_force_thumb, 0 },
3948 { "thumb_func", s_thumb_func, 0 },
3949 { "thumb_set", s_thumb_set, 0 },
3950 { "even", s_even, 0 },
3951 { "ltorg", s_ltorg, 0 },
3952 { "pool", s_ltorg, 0 },
3953 { "syntax", s_syntax, 0 },
3954 { "cpu", s_arm_cpu, 0 },
3955 { "arch", s_arm_arch, 0 },
3956 { "object_arch", s_arm_object_arch, 0 },
3957 { "fpu", s_arm_fpu, 0 },
3958 #ifdef OBJ_ELF
3959 { "word", s_arm_elf_cons, 4 },
3960 { "long", s_arm_elf_cons, 4 },
3961 { "rel31", s_arm_rel31, 0 },
3962 { "fnstart", s_arm_unwind_fnstart, 0 },
3963 { "fnend", s_arm_unwind_fnend, 0 },
3964 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3965 { "personality", s_arm_unwind_personality, 0 },
3966 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3967 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3968 { "save", s_arm_unwind_save, 0 },
3969 { "vsave", s_arm_unwind_save, 1 },
3970 { "movsp", s_arm_unwind_movsp, 0 },
3971 { "pad", s_arm_unwind_pad, 0 },
3972 { "setfp", s_arm_unwind_setfp, 0 },
3973 { "unwind_raw", s_arm_unwind_raw, 0 },
3974 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3975 #else
3976 { "word", cons, 4},
3977
3978 /* These are used for dwarf. */
3979 {"2byte", cons, 2},
3980 {"4byte", cons, 4},
3981 {"8byte", cons, 8},
3982 /* These are used for dwarf2. */
3983 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3984 { "loc", dwarf2_directive_loc, 0 },
3985 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3986 #endif
3987 { "extend", float_cons, 'x' },
3988 { "ldouble", float_cons, 'x' },
3989 { "packed", float_cons, 'p' },
3990 #ifdef TE_PE
3991 {"secrel32", pe_directive_secrel, 0},
3992 #endif
3993 { 0, 0, 0 }
3994 };
3995 \f
3996 /* Parser functions used exclusively in instruction operands. */
3997
3998 /* Generic immediate-value read function for use in insn parsing.
3999 STR points to the beginning of the immediate (the leading #);
4000 VAL receives the value; if the value is outside [MIN, MAX]
4001 issue an error. PREFIX_OPT is true if the immediate prefix is
4002 optional. */
4003
4004 static int
4005 parse_immediate (char **str, int *val, int min, int max,
4006 bfd_boolean prefix_opt)
4007 {
4008 expressionS exp;
4009 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4010 if (exp.X_op != O_constant)
4011 {
4012 inst.error = _("constant expression required");
4013 return FAIL;
4014 }
4015
4016 if (exp.X_add_number < min || exp.X_add_number > max)
4017 {
4018 inst.error = _("immediate value out of range");
4019 return FAIL;
4020 }
4021
4022 *val = exp.X_add_number;
4023 return SUCCESS;
4024 }
4025
4026 /* Less-generic immediate-value read function with the possibility of loading a
4027 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4028 instructions. Puts the result directly in inst.operands[i]. */
4029
4030 static int
4031 parse_big_immediate (char **str, int i)
4032 {
4033 expressionS exp;
4034 char *ptr = *str;
4035
4036 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4037
4038 if (exp.X_op == O_constant)
4039 {
4040 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4041 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4042 O_constant. We have to be careful not to break compilation for
4043 32-bit X_add_number, though. */
4044 if ((exp.X_add_number & ~0xffffffffl) != 0)
4045 {
4046 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4047 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4048 inst.operands[i].regisimm = 1;
4049 }
4050 }
4051 else if (exp.X_op == O_big
4052 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4053 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4054 {
4055 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4056 /* Bignums have their least significant bits in
4057 generic_bignum[0]. Make sure we put 32 bits in imm and
4058 32 bits in reg, in a (hopefully) portable way. */
4059 assert (parts != 0);
4060 inst.operands[i].imm = 0;
4061 for (j = 0; j < parts; j++, idx++)
4062 inst.operands[i].imm |= generic_bignum[idx]
4063 << (LITTLENUM_NUMBER_OF_BITS * j);
4064 inst.operands[i].reg = 0;
4065 for (j = 0; j < parts; j++, idx++)
4066 inst.operands[i].reg |= generic_bignum[idx]
4067 << (LITTLENUM_NUMBER_OF_BITS * j);
4068 inst.operands[i].regisimm = 1;
4069 }
4070 else
4071 return FAIL;
4072
4073 *str = ptr;
4074
4075 return SUCCESS;
4076 }
4077
4078 /* Returns the pseudo-register number of an FPA immediate constant,
4079 or FAIL if there isn't a valid constant here. */
4080
4081 static int
4082 parse_fpa_immediate (char ** str)
4083 {
4084 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4085 char * save_in;
4086 expressionS exp;
4087 int i;
4088 int j;
4089
4090 /* First try and match exact strings, this is to guarantee
4091 that some formats will work even for cross assembly. */
4092
4093 for (i = 0; fp_const[i]; i++)
4094 {
4095 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4096 {
4097 char *start = *str;
4098
4099 *str += strlen (fp_const[i]);
4100 if (is_end_of_line[(unsigned char) **str])
4101 return i + 8;
4102 *str = start;
4103 }
4104 }
4105
4106 /* Just because we didn't get a match doesn't mean that the constant
4107 isn't valid, just that it is in a format that we don't
4108 automatically recognize. Try parsing it with the standard
4109 expression routines. */
4110
4111 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4112
4113 /* Look for a raw floating point number. */
4114 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4115 && is_end_of_line[(unsigned char) *save_in])
4116 {
4117 for (i = 0; i < NUM_FLOAT_VALS; i++)
4118 {
4119 for (j = 0; j < MAX_LITTLENUMS; j++)
4120 {
4121 if (words[j] != fp_values[i][j])
4122 break;
4123 }
4124
4125 if (j == MAX_LITTLENUMS)
4126 {
4127 *str = save_in;
4128 return i + 8;
4129 }
4130 }
4131 }
4132
4133 /* Try and parse a more complex expression, this will probably fail
4134 unless the code uses a floating point prefix (eg "0f"). */
4135 save_in = input_line_pointer;
4136 input_line_pointer = *str;
4137 if (expression (&exp) == absolute_section
4138 && exp.X_op == O_big
4139 && exp.X_add_number < 0)
4140 {
4141 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4142 Ditto for 15. */
4143 if (gen_to_words (words, 5, (long) 15) == 0)
4144 {
4145 for (i = 0; i < NUM_FLOAT_VALS; i++)
4146 {
4147 for (j = 0; j < MAX_LITTLENUMS; j++)
4148 {
4149 if (words[j] != fp_values[i][j])
4150 break;
4151 }
4152
4153 if (j == MAX_LITTLENUMS)
4154 {
4155 *str = input_line_pointer;
4156 input_line_pointer = save_in;
4157 return i + 8;
4158 }
4159 }
4160 }
4161 }
4162
4163 *str = input_line_pointer;
4164 input_line_pointer = save_in;
4165 inst.error = _("invalid FPA immediate expression");
4166 return FAIL;
4167 }
4168
4169 /* Returns 1 if a number has "quarter-precision" float format
4170 0baBbbbbbc defgh000 00000000 00000000. */
4171
4172 static int
4173 is_quarter_float (unsigned imm)
4174 {
4175 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4176 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4177 }
4178
4179 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4180 0baBbbbbbc defgh000 00000000 00000000.
4181 The zero and minus-zero cases need special handling, since they can't be
4182 encoded in the "quarter-precision" float format, but can nonetheless be
4183 loaded as integer constants. */
4184
4185 static unsigned
4186 parse_qfloat_immediate (char **ccp, int *immed)
4187 {
4188 char *str = *ccp;
4189 char *fpnum;
4190 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4191 int found_fpchar = 0;
4192
4193 skip_past_char (&str, '#');
4194
4195 /* We must not accidentally parse an integer as a floating-point number. Make
4196 sure that the value we parse is not an integer by checking for special
4197 characters '.' or 'e'.
4198 FIXME: This is a horrible hack, but doing better is tricky because type
4199 information isn't in a very usable state at parse time. */
4200 fpnum = str;
4201 skip_whitespace (fpnum);
4202
4203 if (strncmp (fpnum, "0x", 2) == 0)
4204 return FAIL;
4205 else
4206 {
4207 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4208 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4209 {
4210 found_fpchar = 1;
4211 break;
4212 }
4213
4214 if (!found_fpchar)
4215 return FAIL;
4216 }
4217
4218 if ((str = atof_ieee (str, 's', words)) != NULL)
4219 {
4220 unsigned fpword = 0;
4221 int i;
4222
4223 /* Our FP word must be 32 bits (single-precision FP). */
4224 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4225 {
4226 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4227 fpword |= words[i];
4228 }
4229
4230 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4231 *immed = fpword;
4232 else
4233 return FAIL;
4234
4235 *ccp = str;
4236
4237 return SUCCESS;
4238 }
4239
4240 return FAIL;
4241 }
4242
4243 /* Shift operands. */
4244 enum shift_kind
4245 {
4246 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4247 };
4248
4249 struct asm_shift_name
4250 {
4251 const char *name;
4252 enum shift_kind kind;
4253 };
4254
4255 /* Third argument to parse_shift. */
4256 enum parse_shift_mode
4257 {
4258 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4259 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4260 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4261 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4262 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4263 };
4264
4265 /* Parse a <shift> specifier on an ARM data processing instruction.
4266 This has three forms:
4267
4268 (LSL|LSR|ASL|ASR|ROR) Rs
4269 (LSL|LSR|ASL|ASR|ROR) #imm
4270 RRX
4271
4272 Note that ASL is assimilated to LSL in the instruction encoding, and
4273 RRX to ROR #0 (which cannot be written as such). */
4274
4275 static int
4276 parse_shift (char **str, int i, enum parse_shift_mode mode)
4277 {
4278 const struct asm_shift_name *shift_name;
4279 enum shift_kind shift;
4280 char *s = *str;
4281 char *p = s;
4282 int reg;
4283
4284 for (p = *str; ISALPHA (*p); p++)
4285 ;
4286
4287 if (p == *str)
4288 {
4289 inst.error = _("shift expression expected");
4290 return FAIL;
4291 }
4292
4293 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4294
4295 if (shift_name == NULL)
4296 {
4297 inst.error = _("shift expression expected");
4298 return FAIL;
4299 }
4300
4301 shift = shift_name->kind;
4302
4303 switch (mode)
4304 {
4305 case NO_SHIFT_RESTRICT:
4306 case SHIFT_IMMEDIATE: break;
4307
4308 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4309 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4310 {
4311 inst.error = _("'LSL' or 'ASR' required");
4312 return FAIL;
4313 }
4314 break;
4315
4316 case SHIFT_LSL_IMMEDIATE:
4317 if (shift != SHIFT_LSL)
4318 {
4319 inst.error = _("'LSL' required");
4320 return FAIL;
4321 }
4322 break;
4323
4324 case SHIFT_ASR_IMMEDIATE:
4325 if (shift != SHIFT_ASR)
4326 {
4327 inst.error = _("'ASR' required");
4328 return FAIL;
4329 }
4330 break;
4331
4332 default: abort ();
4333 }
4334
4335 if (shift != SHIFT_RRX)
4336 {
4337 /* Whitespace can appear here if the next thing is a bare digit. */
4338 skip_whitespace (p);
4339
4340 if (mode == NO_SHIFT_RESTRICT
4341 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4342 {
4343 inst.operands[i].imm = reg;
4344 inst.operands[i].immisreg = 1;
4345 }
4346 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4347 return FAIL;
4348 }
4349 inst.operands[i].shift_kind = shift;
4350 inst.operands[i].shifted = 1;
4351 *str = p;
4352 return SUCCESS;
4353 }
4354
4355 /* Parse a <shifter_operand> for an ARM data processing instruction:
4356
4357 #<immediate>
4358 #<immediate>, <rotate>
4359 <Rm>
4360 <Rm>, <shift>
4361
4362 where <shift> is defined by parse_shift above, and <rotate> is a
4363 multiple of 2 between 0 and 30. Validation of immediate operands
4364 is deferred to md_apply_fix. */
4365
4366 static int
4367 parse_shifter_operand (char **str, int i)
4368 {
4369 int value;
4370 expressionS expr;
4371
4372 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4373 {
4374 inst.operands[i].reg = value;
4375 inst.operands[i].isreg = 1;
4376
4377 /* parse_shift will override this if appropriate */
4378 inst.reloc.exp.X_op = O_constant;
4379 inst.reloc.exp.X_add_number = 0;
4380
4381 if (skip_past_comma (str) == FAIL)
4382 return SUCCESS;
4383
4384 /* Shift operation on register. */
4385 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4386 }
4387
4388 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4389 return FAIL;
4390
4391 if (skip_past_comma (str) == SUCCESS)
4392 {
4393 /* #x, y -- ie explicit rotation by Y. */
4394 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4395 return FAIL;
4396
4397 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4398 {
4399 inst.error = _("constant expression expected");
4400 return FAIL;
4401 }
4402
4403 value = expr.X_add_number;
4404 if (value < 0 || value > 30 || value % 2 != 0)
4405 {
4406 inst.error = _("invalid rotation");
4407 return FAIL;
4408 }
4409 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4410 {
4411 inst.error = _("invalid constant");
4412 return FAIL;
4413 }
4414
4415 /* Convert to decoded value. md_apply_fix will put it back. */
4416 inst.reloc.exp.X_add_number
4417 = (((inst.reloc.exp.X_add_number << (32 - value))
4418 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4419 }
4420
4421 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4422 inst.reloc.pc_rel = 0;
4423 return SUCCESS;
4424 }
4425
4426 /* Group relocation information. Each entry in the table contains the
4427 textual name of the relocation as may appear in assembler source
4428 and must end with a colon.
4429 Along with this textual name are the relocation codes to be used if
4430 the corresponding instruction is an ALU instruction (ADD or SUB only),
4431 an LDR, an LDRS, or an LDC. */
4432
4433 struct group_reloc_table_entry
4434 {
4435 const char *name;
4436 int alu_code;
4437 int ldr_code;
4438 int ldrs_code;
4439 int ldc_code;
4440 };
4441
4442 typedef enum
4443 {
4444 /* Varieties of non-ALU group relocation. */
4445
4446 GROUP_LDR,
4447 GROUP_LDRS,
4448 GROUP_LDC
4449 } group_reloc_type;
4450
4451 static struct group_reloc_table_entry group_reloc_table[] =
4452 { /* Program counter relative: */
4453 { "pc_g0_nc",
4454 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4455 0, /* LDR */
4456 0, /* LDRS */
4457 0 }, /* LDC */
4458 { "pc_g0",
4459 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4460 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4461 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4462 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4463 { "pc_g1_nc",
4464 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4465 0, /* LDR */
4466 0, /* LDRS */
4467 0 }, /* LDC */
4468 { "pc_g1",
4469 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4470 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4471 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4472 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4473 { "pc_g2",
4474 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4475 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4476 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4477 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4478 /* Section base relative */
4479 { "sb_g0_nc",
4480 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4481 0, /* LDR */
4482 0, /* LDRS */
4483 0 }, /* LDC */
4484 { "sb_g0",
4485 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4486 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4487 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4488 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4489 { "sb_g1_nc",
4490 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4491 0, /* LDR */
4492 0, /* LDRS */
4493 0 }, /* LDC */
4494 { "sb_g1",
4495 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4496 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4497 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4498 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4499 { "sb_g2",
4500 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4501 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4502 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4503 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4504
4505 /* Given the address of a pointer pointing to the textual name of a group
4506 relocation as may appear in assembler source, attempt to find its details
4507 in group_reloc_table. The pointer will be updated to the character after
4508 the trailing colon. On failure, FAIL will be returned; SUCCESS
4509 otherwise. On success, *entry will be updated to point at the relevant
4510 group_reloc_table entry. */
4511
4512 static int
4513 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4514 {
4515 unsigned int i;
4516 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4517 {
4518 int length = strlen (group_reloc_table[i].name);
4519
4520 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4521 && (*str)[length] == ':')
4522 {
4523 *out = &group_reloc_table[i];
4524 *str += (length + 1);
4525 return SUCCESS;
4526 }
4527 }
4528
4529 return FAIL;
4530 }
4531
4532 /* Parse a <shifter_operand> for an ARM data processing instruction
4533 (as for parse_shifter_operand) where group relocations are allowed:
4534
4535 #<immediate>
4536 #<immediate>, <rotate>
4537 #:<group_reloc>:<expression>
4538 <Rm>
4539 <Rm>, <shift>
4540
4541 where <group_reloc> is one of the strings defined in group_reloc_table.
4542 The hashes are optional.
4543
4544 Everything else is as for parse_shifter_operand. */
4545
4546 static parse_operand_result
4547 parse_shifter_operand_group_reloc (char **str, int i)
4548 {
4549 /* Determine if we have the sequence of characters #: or just :
4550 coming next. If we do, then we check for a group relocation.
4551 If we don't, punt the whole lot to parse_shifter_operand. */
4552
4553 if (((*str)[0] == '#' && (*str)[1] == ':')
4554 || (*str)[0] == ':')
4555 {
4556 struct group_reloc_table_entry *entry;
4557
4558 if ((*str)[0] == '#')
4559 (*str) += 2;
4560 else
4561 (*str)++;
4562
4563 /* Try to parse a group relocation. Anything else is an error. */
4564 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4565 {
4566 inst.error = _("unknown group relocation");
4567 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4568 }
4569
4570 /* We now have the group relocation table entry corresponding to
4571 the name in the assembler source. Next, we parse the expression. */
4572 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4573 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4574
4575 /* Record the relocation type (always the ALU variant here). */
4576 inst.reloc.type = entry->alu_code;
4577 assert (inst.reloc.type != 0);
4578
4579 return PARSE_OPERAND_SUCCESS;
4580 }
4581 else
4582 return parse_shifter_operand (str, i) == SUCCESS
4583 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4584
4585 /* Never reached. */
4586 }
4587
4588 /* Parse all forms of an ARM address expression. Information is written
4589 to inst.operands[i] and/or inst.reloc.
4590
4591 Preindexed addressing (.preind=1):
4592
4593 [Rn, #offset] .reg=Rn .reloc.exp=offset
4594 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4595 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4596 .shift_kind=shift .reloc.exp=shift_imm
4597
4598 These three may have a trailing ! which causes .writeback to be set also.
4599
4600 Postindexed addressing (.postind=1, .writeback=1):
4601
4602 [Rn], #offset .reg=Rn .reloc.exp=offset
4603 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4604 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4605 .shift_kind=shift .reloc.exp=shift_imm
4606
4607 Unindexed addressing (.preind=0, .postind=0):
4608
4609 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4610
4611 Other:
4612
4613 [Rn]{!} shorthand for [Rn,#0]{!}
4614 =immediate .isreg=0 .reloc.exp=immediate
4615 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4616
4617 It is the caller's responsibility to check for addressing modes not
4618 supported by the instruction, and to set inst.reloc.type. */
4619
4620 static parse_operand_result
4621 parse_address_main (char **str, int i, int group_relocations,
4622 group_reloc_type group_type)
4623 {
4624 char *p = *str;
4625 int reg;
4626
4627 if (skip_past_char (&p, '[') == FAIL)
4628 {
4629 if (skip_past_char (&p, '=') == FAIL)
4630 {
4631 /* bare address - translate to PC-relative offset */
4632 inst.reloc.pc_rel = 1;
4633 inst.operands[i].reg = REG_PC;
4634 inst.operands[i].isreg = 1;
4635 inst.operands[i].preind = 1;
4636 }
4637 /* else a load-constant pseudo op, no special treatment needed here */
4638
4639 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4640 return PARSE_OPERAND_FAIL;
4641
4642 *str = p;
4643 return PARSE_OPERAND_SUCCESS;
4644 }
4645
4646 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4647 {
4648 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4649 return PARSE_OPERAND_FAIL;
4650 }
4651 inst.operands[i].reg = reg;
4652 inst.operands[i].isreg = 1;
4653
4654 if (skip_past_comma (&p) == SUCCESS)
4655 {
4656 inst.operands[i].preind = 1;
4657
4658 if (*p == '+') p++;
4659 else if (*p == '-') p++, inst.operands[i].negative = 1;
4660
4661 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4662 {
4663 inst.operands[i].imm = reg;
4664 inst.operands[i].immisreg = 1;
4665
4666 if (skip_past_comma (&p) == SUCCESS)
4667 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4668 return PARSE_OPERAND_FAIL;
4669 }
4670 else if (skip_past_char (&p, ':') == SUCCESS)
4671 {
4672 /* FIXME: '@' should be used here, but it's filtered out by generic
4673 code before we get to see it here. This may be subject to
4674 change. */
4675 expressionS exp;
4676 my_get_expression (&exp, &p, GE_NO_PREFIX);
4677 if (exp.X_op != O_constant)
4678 {
4679 inst.error = _("alignment must be constant");
4680 return PARSE_OPERAND_FAIL;
4681 }
4682 inst.operands[i].imm = exp.X_add_number << 8;
4683 inst.operands[i].immisalign = 1;
4684 /* Alignments are not pre-indexes. */
4685 inst.operands[i].preind = 0;
4686 }
4687 else
4688 {
4689 if (inst.operands[i].negative)
4690 {
4691 inst.operands[i].negative = 0;
4692 p--;
4693 }
4694
4695 if (group_relocations
4696 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4697 {
4698 struct group_reloc_table_entry *entry;
4699
4700 /* Skip over the #: or : sequence. */
4701 if (*p == '#')
4702 p += 2;
4703 else
4704 p++;
4705
4706 /* Try to parse a group relocation. Anything else is an
4707 error. */
4708 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4709 {
4710 inst.error = _("unknown group relocation");
4711 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4712 }
4713
4714 /* We now have the group relocation table entry corresponding to
4715 the name in the assembler source. Next, we parse the
4716 expression. */
4717 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4718 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4719
4720 /* Record the relocation type. */
4721 switch (group_type)
4722 {
4723 case GROUP_LDR:
4724 inst.reloc.type = entry->ldr_code;
4725 break;
4726
4727 case GROUP_LDRS:
4728 inst.reloc.type = entry->ldrs_code;
4729 break;
4730
4731 case GROUP_LDC:
4732 inst.reloc.type = entry->ldc_code;
4733 break;
4734
4735 default:
4736 assert (0);
4737 }
4738
4739 if (inst.reloc.type == 0)
4740 {
4741 inst.error = _("this group relocation is not allowed on this instruction");
4742 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4743 }
4744 }
4745 else
4746 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4747 return PARSE_OPERAND_FAIL;
4748 }
4749 }
4750
4751 if (skip_past_char (&p, ']') == FAIL)
4752 {
4753 inst.error = _("']' expected");
4754 return PARSE_OPERAND_FAIL;
4755 }
4756
4757 if (skip_past_char (&p, '!') == SUCCESS)
4758 inst.operands[i].writeback = 1;
4759
4760 else if (skip_past_comma (&p) == SUCCESS)
4761 {
4762 if (skip_past_char (&p, '{') == SUCCESS)
4763 {
4764 /* [Rn], {expr} - unindexed, with option */
4765 if (parse_immediate (&p, &inst.operands[i].imm,
4766 0, 255, TRUE) == FAIL)
4767 return PARSE_OPERAND_FAIL;
4768
4769 if (skip_past_char (&p, '}') == FAIL)
4770 {
4771 inst.error = _("'}' expected at end of 'option' field");
4772 return PARSE_OPERAND_FAIL;
4773 }
4774 if (inst.operands[i].preind)
4775 {
4776 inst.error = _("cannot combine index with option");
4777 return PARSE_OPERAND_FAIL;
4778 }
4779 *str = p;
4780 return PARSE_OPERAND_SUCCESS;
4781 }
4782 else
4783 {
4784 inst.operands[i].postind = 1;
4785 inst.operands[i].writeback = 1;
4786
4787 if (inst.operands[i].preind)
4788 {
4789 inst.error = _("cannot combine pre- and post-indexing");
4790 return PARSE_OPERAND_FAIL;
4791 }
4792
4793 if (*p == '+') p++;
4794 else if (*p == '-') p++, inst.operands[i].negative = 1;
4795
4796 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4797 {
4798 /* We might be using the immediate for alignment already. If we
4799 are, OR the register number into the low-order bits. */
4800 if (inst.operands[i].immisalign)
4801 inst.operands[i].imm |= reg;
4802 else
4803 inst.operands[i].imm = reg;
4804 inst.operands[i].immisreg = 1;
4805
4806 if (skip_past_comma (&p) == SUCCESS)
4807 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4808 return PARSE_OPERAND_FAIL;
4809 }
4810 else
4811 {
4812 if (inst.operands[i].negative)
4813 {
4814 inst.operands[i].negative = 0;
4815 p--;
4816 }
4817 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4818 return PARSE_OPERAND_FAIL;
4819 }
4820 }
4821 }
4822
4823 /* If at this point neither .preind nor .postind is set, we have a
4824 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4825 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4826 {
4827 inst.operands[i].preind = 1;
4828 inst.reloc.exp.X_op = O_constant;
4829 inst.reloc.exp.X_add_number = 0;
4830 }
4831 *str = p;
4832 return PARSE_OPERAND_SUCCESS;
4833 }
4834
4835 static int
4836 parse_address (char **str, int i)
4837 {
4838 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4839 ? SUCCESS : FAIL;
4840 }
4841
4842 static parse_operand_result
4843 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4844 {
4845 return parse_address_main (str, i, 1, type);
4846 }
4847
4848 /* Parse an operand for a MOVW or MOVT instruction. */
4849 static int
4850 parse_half (char **str)
4851 {
4852 char * p;
4853
4854 p = *str;
4855 skip_past_char (&p, '#');
4856 if (strncasecmp (p, ":lower16:", 9) == 0)
4857 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4858 else if (strncasecmp (p, ":upper16:", 9) == 0)
4859 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4860
4861 if (inst.reloc.type != BFD_RELOC_UNUSED)
4862 {
4863 p += 9;
4864 skip_whitespace (p);
4865 }
4866
4867 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4868 return FAIL;
4869
4870 if (inst.reloc.type == BFD_RELOC_UNUSED)
4871 {
4872 if (inst.reloc.exp.X_op != O_constant)
4873 {
4874 inst.error = _("constant expression expected");
4875 return FAIL;
4876 }
4877 if (inst.reloc.exp.X_add_number < 0
4878 || inst.reloc.exp.X_add_number > 0xffff)
4879 {
4880 inst.error = _("immediate value out of range");
4881 return FAIL;
4882 }
4883 }
4884 *str = p;
4885 return SUCCESS;
4886 }
4887
4888 /* Miscellaneous. */
4889
4890 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4891 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4892 static int
4893 parse_psr (char **str)
4894 {
4895 char *p;
4896 unsigned long psr_field;
4897 const struct asm_psr *psr;
4898 char *start;
4899
4900 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4901 feature for ease of use and backwards compatibility. */
4902 p = *str;
4903 if (strncasecmp (p, "SPSR", 4) == 0)
4904 psr_field = SPSR_BIT;
4905 else if (strncasecmp (p, "CPSR", 4) == 0)
4906 psr_field = 0;
4907 else
4908 {
4909 start = p;
4910 do
4911 p++;
4912 while (ISALNUM (*p) || *p == '_');
4913
4914 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4915 if (!psr)
4916 return FAIL;
4917
4918 *str = p;
4919 return psr->field;
4920 }
4921
4922 p += 4;
4923 if (*p == '_')
4924 {
4925 /* A suffix follows. */
4926 p++;
4927 start = p;
4928
4929 do
4930 p++;
4931 while (ISALNUM (*p) || *p == '_');
4932
4933 psr = hash_find_n (arm_psr_hsh, start, p - start);
4934 if (!psr)
4935 goto error;
4936
4937 psr_field |= psr->field;
4938 }
4939 else
4940 {
4941 if (ISALNUM (*p))
4942 goto error; /* Garbage after "[CS]PSR". */
4943
4944 psr_field |= (PSR_c | PSR_f);
4945 }
4946 *str = p;
4947 return psr_field;
4948
4949 error:
4950 inst.error = _("flag for {c}psr instruction expected");
4951 return FAIL;
4952 }
4953
4954 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4955 value suitable for splatting into the AIF field of the instruction. */
4956
4957 static int
4958 parse_cps_flags (char **str)
4959 {
4960 int val = 0;
4961 int saw_a_flag = 0;
4962 char *s = *str;
4963
4964 for (;;)
4965 switch (*s++)
4966 {
4967 case '\0': case ',':
4968 goto done;
4969
4970 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4971 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4972 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4973
4974 default:
4975 inst.error = _("unrecognized CPS flag");
4976 return FAIL;
4977 }
4978
4979 done:
4980 if (saw_a_flag == 0)
4981 {
4982 inst.error = _("missing CPS flags");
4983 return FAIL;
4984 }
4985
4986 *str = s - 1;
4987 return val;
4988 }
4989
4990 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4991 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4992
4993 static int
4994 parse_endian_specifier (char **str)
4995 {
4996 int little_endian;
4997 char *s = *str;
4998
4999 if (strncasecmp (s, "BE", 2))
5000 little_endian = 0;
5001 else if (strncasecmp (s, "LE", 2))
5002 little_endian = 1;
5003 else
5004 {
5005 inst.error = _("valid endian specifiers are be or le");
5006 return FAIL;
5007 }
5008
5009 if (ISALNUM (s[2]) || s[2] == '_')
5010 {
5011 inst.error = _("valid endian specifiers are be or le");
5012 return FAIL;
5013 }
5014
5015 *str = s + 2;
5016 return little_endian;
5017 }
5018
5019 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5020 value suitable for poking into the rotate field of an sxt or sxta
5021 instruction, or FAIL on error. */
5022
5023 static int
5024 parse_ror (char **str)
5025 {
5026 int rot;
5027 char *s = *str;
5028
5029 if (strncasecmp (s, "ROR", 3) == 0)
5030 s += 3;
5031 else
5032 {
5033 inst.error = _("missing rotation field after comma");
5034 return FAIL;
5035 }
5036
5037 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5038 return FAIL;
5039
5040 switch (rot)
5041 {
5042 case 0: *str = s; return 0x0;
5043 case 8: *str = s; return 0x1;
5044 case 16: *str = s; return 0x2;
5045 case 24: *str = s; return 0x3;
5046
5047 default:
5048 inst.error = _("rotation can only be 0, 8, 16, or 24");
5049 return FAIL;
5050 }
5051 }
5052
5053 /* Parse a conditional code (from conds[] below). The value returned is in the
5054 range 0 .. 14, or FAIL. */
5055 static int
5056 parse_cond (char **str)
5057 {
5058 char *q;
5059 const struct asm_cond *c;
5060 int n;
5061 /* Condition codes are always 2 characters, so matching up to
5062 3 characters is sufficient. */
5063 char cond[3];
5064
5065 q = *str;
5066 n = 0;
5067 while (ISALPHA (*q) && n < 3)
5068 {
5069 cond[n] = TOLOWER(*q);
5070 q++;
5071 n++;
5072 }
5073
5074 c = hash_find_n (arm_cond_hsh, cond, n);
5075 if (!c)
5076 {
5077 inst.error = _("condition required");
5078 return FAIL;
5079 }
5080
5081 *str = q;
5082 return c->value;
5083 }
5084
5085 /* Parse an option for a barrier instruction. Returns the encoding for the
5086 option, or FAIL. */
5087 static int
5088 parse_barrier (char **str)
5089 {
5090 char *p, *q;
5091 const struct asm_barrier_opt *o;
5092
5093 p = q = *str;
5094 while (ISALPHA (*q))
5095 q++;
5096
5097 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5098 if (!o)
5099 return FAIL;
5100
5101 *str = q;
5102 return o->value;
5103 }
5104
5105 /* Parse the operands of a table branch instruction. Similar to a memory
5106 operand. */
5107 static int
5108 parse_tb (char **str)
5109 {
5110 char * p = *str;
5111 int reg;
5112
5113 if (skip_past_char (&p, '[') == FAIL)
5114 {
5115 inst.error = _("'[' expected");
5116 return FAIL;
5117 }
5118
5119 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5120 {
5121 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5122 return FAIL;
5123 }
5124 inst.operands[0].reg = reg;
5125
5126 if (skip_past_comma (&p) == FAIL)
5127 {
5128 inst.error = _("',' expected");
5129 return FAIL;
5130 }
5131
5132 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5133 {
5134 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5135 return FAIL;
5136 }
5137 inst.operands[0].imm = reg;
5138
5139 if (skip_past_comma (&p) == SUCCESS)
5140 {
5141 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5142 return FAIL;
5143 if (inst.reloc.exp.X_add_number != 1)
5144 {
5145 inst.error = _("invalid shift");
5146 return FAIL;
5147 }
5148 inst.operands[0].shifted = 1;
5149 }
5150
5151 if (skip_past_char (&p, ']') == FAIL)
5152 {
5153 inst.error = _("']' expected");
5154 return FAIL;
5155 }
5156 *str = p;
5157 return SUCCESS;
5158 }
5159
5160 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5161 information on the types the operands can take and how they are encoded.
5162 Up to four operands may be read; this function handles setting the
5163 ".present" field for each read operand itself.
5164 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5165 else returns FAIL. */
5166
5167 static int
5168 parse_neon_mov (char **str, int *which_operand)
5169 {
5170 int i = *which_operand, val;
5171 enum arm_reg_type rtype;
5172 char *ptr = *str;
5173 struct neon_type_el optype;
5174
5175 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5176 {
5177 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5178 inst.operands[i].reg = val;
5179 inst.operands[i].isscalar = 1;
5180 inst.operands[i].vectype = optype;
5181 inst.operands[i++].present = 1;
5182
5183 if (skip_past_comma (&ptr) == FAIL)
5184 goto wanted_comma;
5185
5186 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5187 goto wanted_arm;
5188
5189 inst.operands[i].reg = val;
5190 inst.operands[i].isreg = 1;
5191 inst.operands[i].present = 1;
5192 }
5193 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5194 != FAIL)
5195 {
5196 /* Cases 0, 1, 2, 3, 5 (D only). */
5197 if (skip_past_comma (&ptr) == FAIL)
5198 goto wanted_comma;
5199
5200 inst.operands[i].reg = val;
5201 inst.operands[i].isreg = 1;
5202 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5203 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5204 inst.operands[i].isvec = 1;
5205 inst.operands[i].vectype = optype;
5206 inst.operands[i++].present = 1;
5207
5208 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5209 {
5210 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5211 Case 13: VMOV <Sd>, <Rm> */
5212 inst.operands[i].reg = val;
5213 inst.operands[i].isreg = 1;
5214 inst.operands[i].present = 1;
5215
5216 if (rtype == REG_TYPE_NQ)
5217 {
5218 first_error (_("can't use Neon quad register here"));
5219 return FAIL;
5220 }
5221 else if (rtype != REG_TYPE_VFS)
5222 {
5223 i++;
5224 if (skip_past_comma (&ptr) == FAIL)
5225 goto wanted_comma;
5226 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5227 goto wanted_arm;
5228 inst.operands[i].reg = val;
5229 inst.operands[i].isreg = 1;
5230 inst.operands[i].present = 1;
5231 }
5232 }
5233 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5234 &optype)) != FAIL)
5235 {
5236 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5237 Case 1: VMOV<c><q> <Dd>, <Dm>
5238 Case 8: VMOV.F32 <Sd>, <Sm>
5239 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5240
5241 inst.operands[i].reg = val;
5242 inst.operands[i].isreg = 1;
5243 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5244 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5245 inst.operands[i].isvec = 1;
5246 inst.operands[i].vectype = optype;
5247 inst.operands[i].present = 1;
5248
5249 if (skip_past_comma (&ptr) == SUCCESS)
5250 {
5251 /* Case 15. */
5252 i++;
5253
5254 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5255 goto wanted_arm;
5256
5257 inst.operands[i].reg = val;
5258 inst.operands[i].isreg = 1;
5259 inst.operands[i++].present = 1;
5260
5261 if (skip_past_comma (&ptr) == FAIL)
5262 goto wanted_comma;
5263
5264 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5265 goto wanted_arm;
5266
5267 inst.operands[i].reg = val;
5268 inst.operands[i].isreg = 1;
5269 inst.operands[i++].present = 1;
5270 }
5271 }
5272 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5273 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5274 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5275 Case 10: VMOV.F32 <Sd>, #<imm>
5276 Case 11: VMOV.F64 <Dd>, #<imm> */
5277 inst.operands[i].immisfloat = 1;
5278 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5279 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5280 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5281 ;
5282 else
5283 {
5284 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5285 return FAIL;
5286 }
5287 }
5288 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5289 {
5290 /* Cases 6, 7. */
5291 inst.operands[i].reg = val;
5292 inst.operands[i].isreg = 1;
5293 inst.operands[i++].present = 1;
5294
5295 if (skip_past_comma (&ptr) == FAIL)
5296 goto wanted_comma;
5297
5298 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5299 {
5300 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5301 inst.operands[i].reg = val;
5302 inst.operands[i].isscalar = 1;
5303 inst.operands[i].present = 1;
5304 inst.operands[i].vectype = optype;
5305 }
5306 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5307 {
5308 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5309 inst.operands[i].reg = val;
5310 inst.operands[i].isreg = 1;
5311 inst.operands[i++].present = 1;
5312
5313 if (skip_past_comma (&ptr) == FAIL)
5314 goto wanted_comma;
5315
5316 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5317 == FAIL)
5318 {
5319 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5320 return FAIL;
5321 }
5322
5323 inst.operands[i].reg = val;
5324 inst.operands[i].isreg = 1;
5325 inst.operands[i].isvec = 1;
5326 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5327 inst.operands[i].vectype = optype;
5328 inst.operands[i].present = 1;
5329
5330 if (rtype == REG_TYPE_VFS)
5331 {
5332 /* Case 14. */
5333 i++;
5334 if (skip_past_comma (&ptr) == FAIL)
5335 goto wanted_comma;
5336 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5337 &optype)) == FAIL)
5338 {
5339 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5340 return FAIL;
5341 }
5342 inst.operands[i].reg = val;
5343 inst.operands[i].isreg = 1;
5344 inst.operands[i].isvec = 1;
5345 inst.operands[i].issingle = 1;
5346 inst.operands[i].vectype = optype;
5347 inst.operands[i].present = 1;
5348 }
5349 }
5350 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5351 != FAIL)
5352 {
5353 /* Case 13. */
5354 inst.operands[i].reg = val;
5355 inst.operands[i].isreg = 1;
5356 inst.operands[i].isvec = 1;
5357 inst.operands[i].issingle = 1;
5358 inst.operands[i].vectype = optype;
5359 inst.operands[i++].present = 1;
5360 }
5361 }
5362 else
5363 {
5364 first_error (_("parse error"));
5365 return FAIL;
5366 }
5367
5368 /* Successfully parsed the operands. Update args. */
5369 *which_operand = i;
5370 *str = ptr;
5371 return SUCCESS;
5372
5373 wanted_comma:
5374 first_error (_("expected comma"));
5375 return FAIL;
5376
5377 wanted_arm:
5378 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5379 return FAIL;
5380 }
5381
5382 /* Matcher codes for parse_operands. */
5383 enum operand_parse_code
5384 {
5385 OP_stop, /* end of line */
5386
5387 OP_RR, /* ARM register */
5388 OP_RRnpc, /* ARM register, not r15 */
5389 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5390 OP_RRw, /* ARM register, not r15, optional trailing ! */
5391 OP_RCP, /* Coprocessor number */
5392 OP_RCN, /* Coprocessor register */
5393 OP_RF, /* FPA register */
5394 OP_RVS, /* VFP single precision register */
5395 OP_RVD, /* VFP double precision register (0..15) */
5396 OP_RND, /* Neon double precision register (0..31) */
5397 OP_RNQ, /* Neon quad precision register */
5398 OP_RVSD, /* VFP single or double precision register */
5399 OP_RNDQ, /* Neon double or quad precision register */
5400 OP_RNSDQ, /* Neon single, double or quad precision register */
5401 OP_RNSC, /* Neon scalar D[X] */
5402 OP_RVC, /* VFP control register */
5403 OP_RMF, /* Maverick F register */
5404 OP_RMD, /* Maverick D register */
5405 OP_RMFX, /* Maverick FX register */
5406 OP_RMDX, /* Maverick DX register */
5407 OP_RMAX, /* Maverick AX register */
5408 OP_RMDS, /* Maverick DSPSC register */
5409 OP_RIWR, /* iWMMXt wR register */
5410 OP_RIWC, /* iWMMXt wC register */
5411 OP_RIWG, /* iWMMXt wCG register */
5412 OP_RXA, /* XScale accumulator register */
5413
5414 OP_REGLST, /* ARM register list */
5415 OP_VRSLST, /* VFP single-precision register list */
5416 OP_VRDLST, /* VFP double-precision register list */
5417 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5418 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5419 OP_NSTRLST, /* Neon element/structure list */
5420
5421 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5422 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5423 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5424 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5425 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5426 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5427 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5428 OP_VMOV, /* Neon VMOV operands. */
5429 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5430 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5431 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5432
5433 OP_I0, /* immediate zero */
5434 OP_I7, /* immediate value 0 .. 7 */
5435 OP_I15, /* 0 .. 15 */
5436 OP_I16, /* 1 .. 16 */
5437 OP_I16z, /* 0 .. 16 */
5438 OP_I31, /* 0 .. 31 */
5439 OP_I31w, /* 0 .. 31, optional trailing ! */
5440 OP_I32, /* 1 .. 32 */
5441 OP_I32z, /* 0 .. 32 */
5442 OP_I63, /* 0 .. 63 */
5443 OP_I63s, /* -64 .. 63 */
5444 OP_I64, /* 1 .. 64 */
5445 OP_I64z, /* 0 .. 64 */
5446 OP_I255, /* 0 .. 255 */
5447
5448 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5449 OP_I7b, /* 0 .. 7 */
5450 OP_I15b, /* 0 .. 15 */
5451 OP_I31b, /* 0 .. 31 */
5452
5453 OP_SH, /* shifter operand */
5454 OP_SHG, /* shifter operand with possible group relocation */
5455 OP_ADDR, /* Memory address expression (any mode) */
5456 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5457 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5458 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5459 OP_EXP, /* arbitrary expression */
5460 OP_EXPi, /* same, with optional immediate prefix */
5461 OP_EXPr, /* same, with optional relocation suffix */
5462 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5463
5464 OP_CPSF, /* CPS flags */
5465 OP_ENDI, /* Endianness specifier */
5466 OP_PSR, /* CPSR/SPSR mask for msr */
5467 OP_COND, /* conditional code */
5468 OP_TB, /* Table branch. */
5469
5470 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5471 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5472
5473 OP_RRnpc_I0, /* ARM register or literal 0 */
5474 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5475 OP_RR_EXi, /* ARM register or expression with imm prefix */
5476 OP_RF_IF, /* FPA register or immediate */
5477 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5478 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5479
5480 /* Optional operands. */
5481 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5482 OP_oI31b, /* 0 .. 31 */
5483 OP_oI32b, /* 1 .. 32 */
5484 OP_oIffffb, /* 0 .. 65535 */
5485 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5486
5487 OP_oRR, /* ARM register */
5488 OP_oRRnpc, /* ARM register, not the PC */
5489 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5490 OP_oRND, /* Optional Neon double precision register */
5491 OP_oRNQ, /* Optional Neon quad precision register */
5492 OP_oRNDQ, /* Optional Neon double or quad precision register */
5493 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5494 OP_oSHll, /* LSL immediate */
5495 OP_oSHar, /* ASR immediate */
5496 OP_oSHllar, /* LSL or ASR immediate */
5497 OP_oROR, /* ROR 0/8/16/24 */
5498 OP_oBARRIER, /* Option argument for a barrier instruction. */
5499
5500 OP_FIRST_OPTIONAL = OP_oI7b
5501 };
5502
5503 /* Generic instruction operand parser. This does no encoding and no
5504 semantic validation; it merely squirrels values away in the inst
5505 structure. Returns SUCCESS or FAIL depending on whether the
5506 specified grammar matched. */
5507 static int
5508 parse_operands (char *str, const unsigned char *pattern)
5509 {
5510 unsigned const char *upat = pattern;
5511 char *backtrack_pos = 0;
5512 const char *backtrack_error = 0;
5513 int i, val, backtrack_index = 0;
5514 enum arm_reg_type rtype;
5515 parse_operand_result result;
5516
5517 #define po_char_or_fail(chr) do { \
5518 if (skip_past_char (&str, chr) == FAIL) \
5519 goto bad_args; \
5520 } while (0)
5521
5522 #define po_reg_or_fail(regtype) do { \
5523 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5524 &inst.operands[i].vectype); \
5525 if (val == FAIL) \
5526 { \
5527 first_error (_(reg_expected_msgs[regtype])); \
5528 goto failure; \
5529 } \
5530 inst.operands[i].reg = val; \
5531 inst.operands[i].isreg = 1; \
5532 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5533 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5534 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5535 || rtype == REG_TYPE_VFD \
5536 || rtype == REG_TYPE_NQ); \
5537 } while (0)
5538
5539 #define po_reg_or_goto(regtype, label) do { \
5540 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5541 &inst.operands[i].vectype); \
5542 if (val == FAIL) \
5543 goto label; \
5544 \
5545 inst.operands[i].reg = val; \
5546 inst.operands[i].isreg = 1; \
5547 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5548 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5549 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5550 || rtype == REG_TYPE_VFD \
5551 || rtype == REG_TYPE_NQ); \
5552 } while (0)
5553
5554 #define po_imm_or_fail(min, max, popt) do { \
5555 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5556 goto failure; \
5557 inst.operands[i].imm = val; \
5558 } while (0)
5559
5560 #define po_scalar_or_goto(elsz, label) do { \
5561 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5562 if (val == FAIL) \
5563 goto label; \
5564 inst.operands[i].reg = val; \
5565 inst.operands[i].isscalar = 1; \
5566 } while (0)
5567
5568 #define po_misc_or_fail(expr) do { \
5569 if (expr) \
5570 goto failure; \
5571 } while (0)
5572
5573 #define po_misc_or_fail_no_backtrack(expr) do { \
5574 result = expr; \
5575 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5576 backtrack_pos = 0; \
5577 if (result != PARSE_OPERAND_SUCCESS) \
5578 goto failure; \
5579 } while (0)
5580
5581 skip_whitespace (str);
5582
5583 for (i = 0; upat[i] != OP_stop; i++)
5584 {
5585 if (upat[i] >= OP_FIRST_OPTIONAL)
5586 {
5587 /* Remember where we are in case we need to backtrack. */
5588 assert (!backtrack_pos);
5589 backtrack_pos = str;
5590 backtrack_error = inst.error;
5591 backtrack_index = i;
5592 }
5593
5594 if (i > 0 && (i > 1 || inst.operands[0].present))
5595 po_char_or_fail (',');
5596
5597 switch (upat[i])
5598 {
5599 /* Registers */
5600 case OP_oRRnpc:
5601 case OP_RRnpc:
5602 case OP_oRR:
5603 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5604 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5605 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5606 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5607 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5608 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5609 case OP_oRND:
5610 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5611 case OP_RVC:
5612 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
5613 break;
5614 /* Also accept generic coprocessor regs for unknown registers. */
5615 coproc_reg:
5616 po_reg_or_fail (REG_TYPE_CN);
5617 break;
5618 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5619 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5620 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5621 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5622 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5623 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5624 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5625 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5626 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5627 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5628 case OP_oRNQ:
5629 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5630 case OP_oRNDQ:
5631 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5632 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5633 case OP_oRNSDQ:
5634 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5635
5636 /* Neon scalar. Using an element size of 8 means that some invalid
5637 scalars are accepted here, so deal with those in later code. */
5638 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5639
5640 /* WARNING: We can expand to two operands here. This has the potential
5641 to totally confuse the backtracking mechanism! It will be OK at
5642 least as long as we don't try to use optional args as well,
5643 though. */
5644 case OP_NILO:
5645 {
5646 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5647 inst.operands[i].present = 1;
5648 i++;
5649 skip_past_comma (&str);
5650 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5651 break;
5652 one_reg_only:
5653 /* Optional register operand was omitted. Unfortunately, it's in
5654 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5655 here (this is a bit grotty). */
5656 inst.operands[i] = inst.operands[i-1];
5657 inst.operands[i-1].present = 0;
5658 break;
5659 try_imm:
5660 /* There's a possibility of getting a 64-bit immediate here, so
5661 we need special handling. */
5662 if (parse_big_immediate (&str, i) == FAIL)
5663 {
5664 inst.error = _("immediate value is out of range");
5665 goto failure;
5666 }
5667 }
5668 break;
5669
5670 case OP_RNDQ_I0:
5671 {
5672 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5673 break;
5674 try_imm0:
5675 po_imm_or_fail (0, 0, TRUE);
5676 }
5677 break;
5678
5679 case OP_RVSD_I0:
5680 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5681 break;
5682
5683 case OP_RR_RNSC:
5684 {
5685 po_scalar_or_goto (8, try_rr);
5686 break;
5687 try_rr:
5688 po_reg_or_fail (REG_TYPE_RN);
5689 }
5690 break;
5691
5692 case OP_RNSDQ_RNSC:
5693 {
5694 po_scalar_or_goto (8, try_nsdq);
5695 break;
5696 try_nsdq:
5697 po_reg_or_fail (REG_TYPE_NSDQ);
5698 }
5699 break;
5700
5701 case OP_RNDQ_RNSC:
5702 {
5703 po_scalar_or_goto (8, try_ndq);
5704 break;
5705 try_ndq:
5706 po_reg_or_fail (REG_TYPE_NDQ);
5707 }
5708 break;
5709
5710 case OP_RND_RNSC:
5711 {
5712 po_scalar_or_goto (8, try_vfd);
5713 break;
5714 try_vfd:
5715 po_reg_or_fail (REG_TYPE_VFD);
5716 }
5717 break;
5718
5719 case OP_VMOV:
5720 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5721 not careful then bad things might happen. */
5722 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5723 break;
5724
5725 case OP_RNDQ_IMVNb:
5726 {
5727 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5728 break;
5729 try_mvnimm:
5730 /* There's a possibility of getting a 64-bit immediate here, so
5731 we need special handling. */
5732 if (parse_big_immediate (&str, i) == FAIL)
5733 {
5734 inst.error = _("immediate value is out of range");
5735 goto failure;
5736 }
5737 }
5738 break;
5739
5740 case OP_RNDQ_I63b:
5741 {
5742 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5743 break;
5744 try_shimm:
5745 po_imm_or_fail (0, 63, TRUE);
5746 }
5747 break;
5748
5749 case OP_RRnpcb:
5750 po_char_or_fail ('[');
5751 po_reg_or_fail (REG_TYPE_RN);
5752 po_char_or_fail (']');
5753 break;
5754
5755 case OP_RRw:
5756 case OP_oRRw:
5757 po_reg_or_fail (REG_TYPE_RN);
5758 if (skip_past_char (&str, '!') == SUCCESS)
5759 inst.operands[i].writeback = 1;
5760 break;
5761
5762 /* Immediates */
5763 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5764 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5765 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5766 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5767 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5768 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5769 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5770 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5771 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5772 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5773 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5774 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5775
5776 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5777 case OP_oI7b:
5778 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5779 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5780 case OP_oI31b:
5781 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5782 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5783 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5784
5785 /* Immediate variants */
5786 case OP_oI255c:
5787 po_char_or_fail ('{');
5788 po_imm_or_fail (0, 255, TRUE);
5789 po_char_or_fail ('}');
5790 break;
5791
5792 case OP_I31w:
5793 /* The expression parser chokes on a trailing !, so we have
5794 to find it first and zap it. */
5795 {
5796 char *s = str;
5797 while (*s && *s != ',')
5798 s++;
5799 if (s[-1] == '!')
5800 {
5801 s[-1] = '\0';
5802 inst.operands[i].writeback = 1;
5803 }
5804 po_imm_or_fail (0, 31, TRUE);
5805 if (str == s - 1)
5806 str = s;
5807 }
5808 break;
5809
5810 /* Expressions */
5811 case OP_EXPi: EXPi:
5812 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5813 GE_OPT_PREFIX));
5814 break;
5815
5816 case OP_EXP:
5817 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5818 GE_NO_PREFIX));
5819 break;
5820
5821 case OP_EXPr: EXPr:
5822 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5823 GE_NO_PREFIX));
5824 if (inst.reloc.exp.X_op == O_symbol)
5825 {
5826 val = parse_reloc (&str);
5827 if (val == -1)
5828 {
5829 inst.error = _("unrecognized relocation suffix");
5830 goto failure;
5831 }
5832 else if (val != BFD_RELOC_UNUSED)
5833 {
5834 inst.operands[i].imm = val;
5835 inst.operands[i].hasreloc = 1;
5836 }
5837 }
5838 break;
5839
5840 /* Operand for MOVW or MOVT. */
5841 case OP_HALF:
5842 po_misc_or_fail (parse_half (&str));
5843 break;
5844
5845 /* Register or expression */
5846 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5847 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5848
5849 /* Register or immediate */
5850 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5851 I0: po_imm_or_fail (0, 0, FALSE); break;
5852
5853 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5854 IF:
5855 if (!is_immediate_prefix (*str))
5856 goto bad_args;
5857 str++;
5858 val = parse_fpa_immediate (&str);
5859 if (val == FAIL)
5860 goto failure;
5861 /* FPA immediates are encoded as registers 8-15.
5862 parse_fpa_immediate has already applied the offset. */
5863 inst.operands[i].reg = val;
5864 inst.operands[i].isreg = 1;
5865 break;
5866
5867 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5868 I32z: po_imm_or_fail (0, 32, FALSE); break;
5869
5870 /* Two kinds of register */
5871 case OP_RIWR_RIWC:
5872 {
5873 struct reg_entry *rege = arm_reg_parse_multi (&str);
5874 if (!rege
5875 || (rege->type != REG_TYPE_MMXWR
5876 && rege->type != REG_TYPE_MMXWC
5877 && rege->type != REG_TYPE_MMXWCG))
5878 {
5879 inst.error = _("iWMMXt data or control register expected");
5880 goto failure;
5881 }
5882 inst.operands[i].reg = rege->number;
5883 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5884 }
5885 break;
5886
5887 case OP_RIWC_RIWG:
5888 {
5889 struct reg_entry *rege = arm_reg_parse_multi (&str);
5890 if (!rege
5891 || (rege->type != REG_TYPE_MMXWC
5892 && rege->type != REG_TYPE_MMXWCG))
5893 {
5894 inst.error = _("iWMMXt control register expected");
5895 goto failure;
5896 }
5897 inst.operands[i].reg = rege->number;
5898 inst.operands[i].isreg = 1;
5899 }
5900 break;
5901
5902 /* Misc */
5903 case OP_CPSF: val = parse_cps_flags (&str); break;
5904 case OP_ENDI: val = parse_endian_specifier (&str); break;
5905 case OP_oROR: val = parse_ror (&str); break;
5906 case OP_PSR: val = parse_psr (&str); break;
5907 case OP_COND: val = parse_cond (&str); break;
5908 case OP_oBARRIER:val = parse_barrier (&str); break;
5909
5910 case OP_RVC_PSR:
5911 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5912 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5913 break;
5914 try_psr:
5915 val = parse_psr (&str);
5916 break;
5917
5918 case OP_APSR_RR:
5919 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5920 break;
5921 try_apsr:
5922 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5923 instruction). */
5924 if (strncasecmp (str, "APSR_", 5) == 0)
5925 {
5926 unsigned found = 0;
5927 str += 5;
5928 while (found < 15)
5929 switch (*str++)
5930 {
5931 case 'c': found = (found & 1) ? 16 : found | 1; break;
5932 case 'n': found = (found & 2) ? 16 : found | 2; break;
5933 case 'z': found = (found & 4) ? 16 : found | 4; break;
5934 case 'v': found = (found & 8) ? 16 : found | 8; break;
5935 default: found = 16;
5936 }
5937 if (found != 15)
5938 goto failure;
5939 inst.operands[i].isvec = 1;
5940 }
5941 else
5942 goto failure;
5943 break;
5944
5945 case OP_TB:
5946 po_misc_or_fail (parse_tb (&str));
5947 break;
5948
5949 /* Register lists */
5950 case OP_REGLST:
5951 val = parse_reg_list (&str);
5952 if (*str == '^')
5953 {
5954 inst.operands[1].writeback = 1;
5955 str++;
5956 }
5957 break;
5958
5959 case OP_VRSLST:
5960 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5961 break;
5962
5963 case OP_VRDLST:
5964 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5965 break;
5966
5967 case OP_VRSDLST:
5968 /* Allow Q registers too. */
5969 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5970 REGLIST_NEON_D);
5971 if (val == FAIL)
5972 {
5973 inst.error = NULL;
5974 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5975 REGLIST_VFP_S);
5976 inst.operands[i].issingle = 1;
5977 }
5978 break;
5979
5980 case OP_NRDLST:
5981 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5982 REGLIST_NEON_D);
5983 break;
5984
5985 case OP_NSTRLST:
5986 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5987 &inst.operands[i].vectype);
5988 break;
5989
5990 /* Addressing modes */
5991 case OP_ADDR:
5992 po_misc_or_fail (parse_address (&str, i));
5993 break;
5994
5995 case OP_ADDRGLDR:
5996 po_misc_or_fail_no_backtrack (
5997 parse_address_group_reloc (&str, i, GROUP_LDR));
5998 break;
5999
6000 case OP_ADDRGLDRS:
6001 po_misc_or_fail_no_backtrack (
6002 parse_address_group_reloc (&str, i, GROUP_LDRS));
6003 break;
6004
6005 case OP_ADDRGLDC:
6006 po_misc_or_fail_no_backtrack (
6007 parse_address_group_reloc (&str, i, GROUP_LDC));
6008 break;
6009
6010 case OP_SH:
6011 po_misc_or_fail (parse_shifter_operand (&str, i));
6012 break;
6013
6014 case OP_SHG:
6015 po_misc_or_fail_no_backtrack (
6016 parse_shifter_operand_group_reloc (&str, i));
6017 break;
6018
6019 case OP_oSHll:
6020 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6021 break;
6022
6023 case OP_oSHar:
6024 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6025 break;
6026
6027 case OP_oSHllar:
6028 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6029 break;
6030
6031 default:
6032 as_fatal (_("unhandled operand code %d"), upat[i]);
6033 }
6034
6035 /* Various value-based sanity checks and shared operations. We
6036 do not signal immediate failures for the register constraints;
6037 this allows a syntax error to take precedence. */
6038 switch (upat[i])
6039 {
6040 case OP_oRRnpc:
6041 case OP_RRnpc:
6042 case OP_RRnpcb:
6043 case OP_RRw:
6044 case OP_oRRw:
6045 case OP_RRnpc_I0:
6046 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6047 inst.error = BAD_PC;
6048 break;
6049
6050 case OP_CPSF:
6051 case OP_ENDI:
6052 case OP_oROR:
6053 case OP_PSR:
6054 case OP_RVC_PSR:
6055 case OP_COND:
6056 case OP_oBARRIER:
6057 case OP_REGLST:
6058 case OP_VRSLST:
6059 case OP_VRDLST:
6060 case OP_VRSDLST:
6061 case OP_NRDLST:
6062 case OP_NSTRLST:
6063 if (val == FAIL)
6064 goto failure;
6065 inst.operands[i].imm = val;
6066 break;
6067
6068 default:
6069 break;
6070 }
6071
6072 /* If we get here, this operand was successfully parsed. */
6073 inst.operands[i].present = 1;
6074 continue;
6075
6076 bad_args:
6077 inst.error = BAD_ARGS;
6078
6079 failure:
6080 if (!backtrack_pos)
6081 {
6082 /* The parse routine should already have set inst.error, but set a
6083 default here just in case. */
6084 if (!inst.error)
6085 inst.error = _("syntax error");
6086 return FAIL;
6087 }
6088
6089 /* Do not backtrack over a trailing optional argument that
6090 absorbed some text. We will only fail again, with the
6091 'garbage following instruction' error message, which is
6092 probably less helpful than the current one. */
6093 if (backtrack_index == i && backtrack_pos != str
6094 && upat[i+1] == OP_stop)
6095 {
6096 if (!inst.error)
6097 inst.error = _("syntax error");
6098 return FAIL;
6099 }
6100
6101 /* Try again, skipping the optional argument at backtrack_pos. */
6102 str = backtrack_pos;
6103 inst.error = backtrack_error;
6104 inst.operands[backtrack_index].present = 0;
6105 i = backtrack_index;
6106 backtrack_pos = 0;
6107 }
6108
6109 /* Check that we have parsed all the arguments. */
6110 if (*str != '\0' && !inst.error)
6111 inst.error = _("garbage following instruction");
6112
6113 return inst.error ? FAIL : SUCCESS;
6114 }
6115
6116 #undef po_char_or_fail
6117 #undef po_reg_or_fail
6118 #undef po_reg_or_goto
6119 #undef po_imm_or_fail
6120 #undef po_scalar_or_fail
6121 \f
6122 /* Shorthand macro for instruction encoding functions issuing errors. */
6123 #define constraint(expr, err) do { \
6124 if (expr) \
6125 { \
6126 inst.error = err; \
6127 return; \
6128 } \
6129 } while (0)
6130
6131 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6132 instructions are unpredictable if these registers are used. This
6133 is the BadReg predicate in ARM's Thumb-2 documentation. */
6134 #define reject_bad_reg(reg) \
6135 do \
6136 if (reg == REG_SP || reg == REG_PC) \
6137 { \
6138 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6139 return; \
6140 } \
6141 while (0)
6142
6143 /* Functions for operand encoding. ARM, then Thumb. */
6144
6145 #define rotate_left(v, n) (v << n | v >> (32 - n))
6146
6147 /* If VAL can be encoded in the immediate field of an ARM instruction,
6148 return the encoded form. Otherwise, return FAIL. */
6149
6150 static unsigned int
6151 encode_arm_immediate (unsigned int val)
6152 {
6153 unsigned int a, i;
6154
6155 for (i = 0; i < 32; i += 2)
6156 if ((a = rotate_left (val, i)) <= 0xff)
6157 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6158
6159 return FAIL;
6160 }
6161
6162 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6163 return the encoded form. Otherwise, return FAIL. */
6164 static unsigned int
6165 encode_thumb32_immediate (unsigned int val)
6166 {
6167 unsigned int a, i;
6168
6169 if (val <= 0xff)
6170 return val;
6171
6172 for (i = 1; i <= 24; i++)
6173 {
6174 a = val >> i;
6175 if ((val & ~(0xff << i)) == 0)
6176 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6177 }
6178
6179 a = val & 0xff;
6180 if (val == ((a << 16) | a))
6181 return 0x100 | a;
6182 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6183 return 0x300 | a;
6184
6185 a = val & 0xff00;
6186 if (val == ((a << 16) | a))
6187 return 0x200 | (a >> 8);
6188
6189 return FAIL;
6190 }
6191 /* Encode a VFP SP or DP register number into inst.instruction. */
6192
6193 static void
6194 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6195 {
6196 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6197 && reg > 15)
6198 {
6199 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6200 {
6201 if (thumb_mode)
6202 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6203 fpu_vfp_ext_d32);
6204 else
6205 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6206 fpu_vfp_ext_d32);
6207 }
6208 else
6209 {
6210 first_error (_("D register out of range for selected VFP version"));
6211 return;
6212 }
6213 }
6214
6215 switch (pos)
6216 {
6217 case VFP_REG_Sd:
6218 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6219 break;
6220
6221 case VFP_REG_Sn:
6222 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6223 break;
6224
6225 case VFP_REG_Sm:
6226 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6227 break;
6228
6229 case VFP_REG_Dd:
6230 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6231 break;
6232
6233 case VFP_REG_Dn:
6234 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6235 break;
6236
6237 case VFP_REG_Dm:
6238 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6239 break;
6240
6241 default:
6242 abort ();
6243 }
6244 }
6245
6246 /* Encode a <shift> in an ARM-format instruction. The immediate,
6247 if any, is handled by md_apply_fix. */
6248 static void
6249 encode_arm_shift (int i)
6250 {
6251 if (inst.operands[i].shift_kind == SHIFT_RRX)
6252 inst.instruction |= SHIFT_ROR << 5;
6253 else
6254 {
6255 inst.instruction |= inst.operands[i].shift_kind << 5;
6256 if (inst.operands[i].immisreg)
6257 {
6258 inst.instruction |= SHIFT_BY_REG;
6259 inst.instruction |= inst.operands[i].imm << 8;
6260 }
6261 else
6262 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6263 }
6264 }
6265
6266 static void
6267 encode_arm_shifter_operand (int i)
6268 {
6269 if (inst.operands[i].isreg)
6270 {
6271 inst.instruction |= inst.operands[i].reg;
6272 encode_arm_shift (i);
6273 }
6274 else
6275 inst.instruction |= INST_IMMEDIATE;
6276 }
6277
6278 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6279 static void
6280 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6281 {
6282 assert (inst.operands[i].isreg);
6283 inst.instruction |= inst.operands[i].reg << 16;
6284
6285 if (inst.operands[i].preind)
6286 {
6287 if (is_t)
6288 {
6289 inst.error = _("instruction does not accept preindexed addressing");
6290 return;
6291 }
6292 inst.instruction |= PRE_INDEX;
6293 if (inst.operands[i].writeback)
6294 inst.instruction |= WRITE_BACK;
6295
6296 }
6297 else if (inst.operands[i].postind)
6298 {
6299 assert (inst.operands[i].writeback);
6300 if (is_t)
6301 inst.instruction |= WRITE_BACK;
6302 }
6303 else /* unindexed - only for coprocessor */
6304 {
6305 inst.error = _("instruction does not accept unindexed addressing");
6306 return;
6307 }
6308
6309 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6310 && (((inst.instruction & 0x000f0000) >> 16)
6311 == ((inst.instruction & 0x0000f000) >> 12)))
6312 as_warn ((inst.instruction & LOAD_BIT)
6313 ? _("destination register same as write-back base")
6314 : _("source register same as write-back base"));
6315 }
6316
6317 /* inst.operands[i] was set up by parse_address. Encode it into an
6318 ARM-format mode 2 load or store instruction. If is_t is true,
6319 reject forms that cannot be used with a T instruction (i.e. not
6320 post-indexed). */
6321 static void
6322 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6323 {
6324 encode_arm_addr_mode_common (i, is_t);
6325
6326 if (inst.operands[i].immisreg)
6327 {
6328 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6329 inst.instruction |= inst.operands[i].imm;
6330 if (!inst.operands[i].negative)
6331 inst.instruction |= INDEX_UP;
6332 if (inst.operands[i].shifted)
6333 {
6334 if (inst.operands[i].shift_kind == SHIFT_RRX)
6335 inst.instruction |= SHIFT_ROR << 5;
6336 else
6337 {
6338 inst.instruction |= inst.operands[i].shift_kind << 5;
6339 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6340 }
6341 }
6342 }
6343 else /* immediate offset in inst.reloc */
6344 {
6345 if (inst.reloc.type == BFD_RELOC_UNUSED)
6346 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6347 }
6348 }
6349
6350 /* inst.operands[i] was set up by parse_address. Encode it into an
6351 ARM-format mode 3 load or store instruction. Reject forms that
6352 cannot be used with such instructions. If is_t is true, reject
6353 forms that cannot be used with a T instruction (i.e. not
6354 post-indexed). */
6355 static void
6356 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6357 {
6358 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6359 {
6360 inst.error = _("instruction does not accept scaled register index");
6361 return;
6362 }
6363
6364 encode_arm_addr_mode_common (i, is_t);
6365
6366 if (inst.operands[i].immisreg)
6367 {
6368 inst.instruction |= inst.operands[i].imm;
6369 if (!inst.operands[i].negative)
6370 inst.instruction |= INDEX_UP;
6371 }
6372 else /* immediate offset in inst.reloc */
6373 {
6374 inst.instruction |= HWOFFSET_IMM;
6375 if (inst.reloc.type == BFD_RELOC_UNUSED)
6376 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6377 }
6378 }
6379
6380 /* inst.operands[i] was set up by parse_address. Encode it into an
6381 ARM-format instruction. Reject all forms which cannot be encoded
6382 into a coprocessor load/store instruction. If wb_ok is false,
6383 reject use of writeback; if unind_ok is false, reject use of
6384 unindexed addressing. If reloc_override is not 0, use it instead
6385 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6386 (in which case it is preserved). */
6387
6388 static int
6389 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6390 {
6391 inst.instruction |= inst.operands[i].reg << 16;
6392
6393 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6394
6395 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6396 {
6397 assert (!inst.operands[i].writeback);
6398 if (!unind_ok)
6399 {
6400 inst.error = _("instruction does not support unindexed addressing");
6401 return FAIL;
6402 }
6403 inst.instruction |= inst.operands[i].imm;
6404 inst.instruction |= INDEX_UP;
6405 return SUCCESS;
6406 }
6407
6408 if (inst.operands[i].preind)
6409 inst.instruction |= PRE_INDEX;
6410
6411 if (inst.operands[i].writeback)
6412 {
6413 if (inst.operands[i].reg == REG_PC)
6414 {
6415 inst.error = _("pc may not be used with write-back");
6416 return FAIL;
6417 }
6418 if (!wb_ok)
6419 {
6420 inst.error = _("instruction does not support writeback");
6421 return FAIL;
6422 }
6423 inst.instruction |= WRITE_BACK;
6424 }
6425
6426 if (reloc_override)
6427 inst.reloc.type = reloc_override;
6428 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6429 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6430 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6431 {
6432 if (thumb_mode)
6433 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6434 else
6435 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6436 }
6437
6438 return SUCCESS;
6439 }
6440
6441 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6442 Determine whether it can be performed with a move instruction; if
6443 it can, convert inst.instruction to that move instruction and
6444 return 1; if it can't, convert inst.instruction to a literal-pool
6445 load and return 0. If this is not a valid thing to do in the
6446 current context, set inst.error and return 1.
6447
6448 inst.operands[i] describes the destination register. */
6449
6450 static int
6451 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6452 {
6453 unsigned long tbit;
6454
6455 if (thumb_p)
6456 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6457 else
6458 tbit = LOAD_BIT;
6459
6460 if ((inst.instruction & tbit) == 0)
6461 {
6462 inst.error = _("invalid pseudo operation");
6463 return 1;
6464 }
6465 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6466 {
6467 inst.error = _("constant expression expected");
6468 return 1;
6469 }
6470 if (inst.reloc.exp.X_op == O_constant)
6471 {
6472 if (thumb_p)
6473 {
6474 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6475 {
6476 /* This can be done with a mov(1) instruction. */
6477 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6478 inst.instruction |= inst.reloc.exp.X_add_number;
6479 return 1;
6480 }
6481 }
6482 else
6483 {
6484 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6485 if (value != FAIL)
6486 {
6487 /* This can be done with a mov instruction. */
6488 inst.instruction &= LITERAL_MASK;
6489 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6490 inst.instruction |= value & 0xfff;
6491 return 1;
6492 }
6493
6494 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6495 if (value != FAIL)
6496 {
6497 /* This can be done with a mvn instruction. */
6498 inst.instruction &= LITERAL_MASK;
6499 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6500 inst.instruction |= value & 0xfff;
6501 return 1;
6502 }
6503 }
6504 }
6505
6506 if (add_to_lit_pool () == FAIL)
6507 {
6508 inst.error = _("literal pool insertion failed");
6509 return 1;
6510 }
6511 inst.operands[1].reg = REG_PC;
6512 inst.operands[1].isreg = 1;
6513 inst.operands[1].preind = 1;
6514 inst.reloc.pc_rel = 1;
6515 inst.reloc.type = (thumb_p
6516 ? BFD_RELOC_ARM_THUMB_OFFSET
6517 : (mode_3
6518 ? BFD_RELOC_ARM_HWLITERAL
6519 : BFD_RELOC_ARM_LITERAL));
6520 return 0;
6521 }
6522
6523 /* Functions for instruction encoding, sorted by sub-architecture.
6524 First some generics; their names are taken from the conventional
6525 bit positions for register arguments in ARM format instructions. */
6526
6527 static void
6528 do_noargs (void)
6529 {
6530 }
6531
6532 static void
6533 do_rd (void)
6534 {
6535 inst.instruction |= inst.operands[0].reg << 12;
6536 }
6537
6538 static void
6539 do_rd_rm (void)
6540 {
6541 inst.instruction |= inst.operands[0].reg << 12;
6542 inst.instruction |= inst.operands[1].reg;
6543 }
6544
6545 static void
6546 do_rd_rn (void)
6547 {
6548 inst.instruction |= inst.operands[0].reg << 12;
6549 inst.instruction |= inst.operands[1].reg << 16;
6550 }
6551
6552 static void
6553 do_rn_rd (void)
6554 {
6555 inst.instruction |= inst.operands[0].reg << 16;
6556 inst.instruction |= inst.operands[1].reg << 12;
6557 }
6558
6559 static void
6560 do_rd_rm_rn (void)
6561 {
6562 unsigned Rn = inst.operands[2].reg;
6563 /* Enforce restrictions on SWP instruction. */
6564 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6565 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6566 _("Rn must not overlap other operands"));
6567 inst.instruction |= inst.operands[0].reg << 12;
6568 inst.instruction |= inst.operands[1].reg;
6569 inst.instruction |= Rn << 16;
6570 }
6571
6572 static void
6573 do_rd_rn_rm (void)
6574 {
6575 inst.instruction |= inst.operands[0].reg << 12;
6576 inst.instruction |= inst.operands[1].reg << 16;
6577 inst.instruction |= inst.operands[2].reg;
6578 }
6579
6580 static void
6581 do_rm_rd_rn (void)
6582 {
6583 inst.instruction |= inst.operands[0].reg;
6584 inst.instruction |= inst.operands[1].reg << 12;
6585 inst.instruction |= inst.operands[2].reg << 16;
6586 }
6587
6588 static void
6589 do_imm0 (void)
6590 {
6591 inst.instruction |= inst.operands[0].imm;
6592 }
6593
6594 static void
6595 do_rd_cpaddr (void)
6596 {
6597 inst.instruction |= inst.operands[0].reg << 12;
6598 encode_arm_cp_address (1, TRUE, TRUE, 0);
6599 }
6600
6601 /* ARM instructions, in alphabetical order by function name (except
6602 that wrapper functions appear immediately after the function they
6603 wrap). */
6604
6605 /* This is a pseudo-op of the form "adr rd, label" to be converted
6606 into a relative address of the form "add rd, pc, #label-.-8". */
6607
6608 static void
6609 do_adr (void)
6610 {
6611 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6612
6613 /* Frag hacking will turn this into a sub instruction if the offset turns
6614 out to be negative. */
6615 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6616 inst.reloc.pc_rel = 1;
6617 inst.reloc.exp.X_add_number -= 8;
6618 }
6619
6620 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6621 into a relative address of the form:
6622 add rd, pc, #low(label-.-8)"
6623 add rd, rd, #high(label-.-8)" */
6624
6625 static void
6626 do_adrl (void)
6627 {
6628 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6629
6630 /* Frag hacking will turn this into a sub instruction if the offset turns
6631 out to be negative. */
6632 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6633 inst.reloc.pc_rel = 1;
6634 inst.size = INSN_SIZE * 2;
6635 inst.reloc.exp.X_add_number -= 8;
6636 }
6637
6638 static void
6639 do_arit (void)
6640 {
6641 if (!inst.operands[1].present)
6642 inst.operands[1].reg = inst.operands[0].reg;
6643 inst.instruction |= inst.operands[0].reg << 12;
6644 inst.instruction |= inst.operands[1].reg << 16;
6645 encode_arm_shifter_operand (2);
6646 }
6647
6648 static void
6649 do_barrier (void)
6650 {
6651 if (inst.operands[0].present)
6652 {
6653 constraint ((inst.instruction & 0xf0) != 0x40
6654 && inst.operands[0].imm != 0xf,
6655 _("bad barrier type"));
6656 inst.instruction |= inst.operands[0].imm;
6657 }
6658 else
6659 inst.instruction |= 0xf;
6660 }
6661
6662 static void
6663 do_bfc (void)
6664 {
6665 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6666 constraint (msb > 32, _("bit-field extends past end of register"));
6667 /* The instruction encoding stores the LSB and MSB,
6668 not the LSB and width. */
6669 inst.instruction |= inst.operands[0].reg << 12;
6670 inst.instruction |= inst.operands[1].imm << 7;
6671 inst.instruction |= (msb - 1) << 16;
6672 }
6673
6674 static void
6675 do_bfi (void)
6676 {
6677 unsigned int msb;
6678
6679 /* #0 in second position is alternative syntax for bfc, which is
6680 the same instruction but with REG_PC in the Rm field. */
6681 if (!inst.operands[1].isreg)
6682 inst.operands[1].reg = REG_PC;
6683
6684 msb = inst.operands[2].imm + inst.operands[3].imm;
6685 constraint (msb > 32, _("bit-field extends past end of register"));
6686 /* The instruction encoding stores the LSB and MSB,
6687 not the LSB and width. */
6688 inst.instruction |= inst.operands[0].reg << 12;
6689 inst.instruction |= inst.operands[1].reg;
6690 inst.instruction |= inst.operands[2].imm << 7;
6691 inst.instruction |= (msb - 1) << 16;
6692 }
6693
6694 static void
6695 do_bfx (void)
6696 {
6697 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6698 _("bit-field extends past end of register"));
6699 inst.instruction |= inst.operands[0].reg << 12;
6700 inst.instruction |= inst.operands[1].reg;
6701 inst.instruction |= inst.operands[2].imm << 7;
6702 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6703 }
6704
6705 /* ARM V5 breakpoint instruction (argument parse)
6706 BKPT <16 bit unsigned immediate>
6707 Instruction is not conditional.
6708 The bit pattern given in insns[] has the COND_ALWAYS condition,
6709 and it is an error if the caller tried to override that. */
6710
6711 static void
6712 do_bkpt (void)
6713 {
6714 /* Top 12 of 16 bits to bits 19:8. */
6715 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6716
6717 /* Bottom 4 of 16 bits to bits 3:0. */
6718 inst.instruction |= inst.operands[0].imm & 0xf;
6719 }
6720
6721 static void
6722 encode_branch (int default_reloc)
6723 {
6724 if (inst.operands[0].hasreloc)
6725 {
6726 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6727 _("the only suffix valid here is '(plt)'"));
6728 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6729 }
6730 else
6731 {
6732 inst.reloc.type = default_reloc;
6733 }
6734 inst.reloc.pc_rel = 1;
6735 }
6736
6737 static void
6738 do_branch (void)
6739 {
6740 #ifdef OBJ_ELF
6741 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6742 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6743 else
6744 #endif
6745 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6746 }
6747
6748 static void
6749 do_bl (void)
6750 {
6751 #ifdef OBJ_ELF
6752 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6753 {
6754 if (inst.cond == COND_ALWAYS)
6755 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6756 else
6757 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6758 }
6759 else
6760 #endif
6761 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6762 }
6763
6764 /* ARM V5 branch-link-exchange instruction (argument parse)
6765 BLX <target_addr> ie BLX(1)
6766 BLX{<condition>} <Rm> ie BLX(2)
6767 Unfortunately, there are two different opcodes for this mnemonic.
6768 So, the insns[].value is not used, and the code here zaps values
6769 into inst.instruction.
6770 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6771
6772 static void
6773 do_blx (void)
6774 {
6775 if (inst.operands[0].isreg)
6776 {
6777 /* Arg is a register; the opcode provided by insns[] is correct.
6778 It is not illegal to do "blx pc", just useless. */
6779 if (inst.operands[0].reg == REG_PC)
6780 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6781
6782 inst.instruction |= inst.operands[0].reg;
6783 }
6784 else
6785 {
6786 /* Arg is an address; this instruction cannot be executed
6787 conditionally, and the opcode must be adjusted. */
6788 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6789 inst.instruction = 0xfa000000;
6790 #ifdef OBJ_ELF
6791 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6792 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6793 else
6794 #endif
6795 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6796 }
6797 }
6798
6799 static void
6800 do_bx (void)
6801 {
6802 bfd_boolean want_reloc;
6803
6804 if (inst.operands[0].reg == REG_PC)
6805 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6806
6807 inst.instruction |= inst.operands[0].reg;
6808 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
6809 it is for ARMv4t or earlier. */
6810 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
6811 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
6812 want_reloc = TRUE;
6813
6814 #ifdef OBJ_ELF
6815 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
6816 #endif
6817 want_reloc = FALSE;
6818
6819 if (want_reloc)
6820 inst.reloc.type = BFD_RELOC_ARM_V4BX;
6821 }
6822
6823
6824 /* ARM v5TEJ. Jump to Jazelle code. */
6825
6826 static void
6827 do_bxj (void)
6828 {
6829 if (inst.operands[0].reg == REG_PC)
6830 as_tsktsk (_("use of r15 in bxj is not really useful"));
6831
6832 inst.instruction |= inst.operands[0].reg;
6833 }
6834
6835 /* Co-processor data operation:
6836 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6837 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6838 static void
6839 do_cdp (void)
6840 {
6841 inst.instruction |= inst.operands[0].reg << 8;
6842 inst.instruction |= inst.operands[1].imm << 20;
6843 inst.instruction |= inst.operands[2].reg << 12;
6844 inst.instruction |= inst.operands[3].reg << 16;
6845 inst.instruction |= inst.operands[4].reg;
6846 inst.instruction |= inst.operands[5].imm << 5;
6847 }
6848
6849 static void
6850 do_cmp (void)
6851 {
6852 inst.instruction |= inst.operands[0].reg << 16;
6853 encode_arm_shifter_operand (1);
6854 }
6855
6856 /* Transfer between coprocessor and ARM registers.
6857 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6858 MRC2
6859 MCR{cond}
6860 MCR2
6861
6862 No special properties. */
6863
6864 static void
6865 do_co_reg (void)
6866 {
6867 unsigned Rd;
6868
6869 Rd = inst.operands[2].reg;
6870 if (thumb_mode)
6871 {
6872 if (inst.instruction == 0xee000010
6873 || inst.instruction == 0xfe000010)
6874 /* MCR, MCR2 */
6875 reject_bad_reg (Rd);
6876 else
6877 /* MRC, MRC2 */
6878 constraint (Rd == REG_SP, BAD_SP);
6879 }
6880 else
6881 {
6882 /* MCR */
6883 if (inst.instruction == 0xe000010)
6884 constraint (Rd == REG_PC, BAD_PC);
6885 }
6886
6887
6888 inst.instruction |= inst.operands[0].reg << 8;
6889 inst.instruction |= inst.operands[1].imm << 21;
6890 inst.instruction |= Rd << 12;
6891 inst.instruction |= inst.operands[3].reg << 16;
6892 inst.instruction |= inst.operands[4].reg;
6893 inst.instruction |= inst.operands[5].imm << 5;
6894 }
6895
6896 /* Transfer between coprocessor register and pair of ARM registers.
6897 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6898 MCRR2
6899 MRRC{cond}
6900 MRRC2
6901
6902 Two XScale instructions are special cases of these:
6903
6904 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6905 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6906
6907 Result unpredictable if Rd or Rn is R15. */
6908
6909 static void
6910 do_co_reg2c (void)
6911 {
6912 unsigned Rd, Rn;
6913
6914 Rd = inst.operands[2].reg;
6915 Rn = inst.operands[3].reg;
6916
6917 if (thumb_mode)
6918 {
6919 reject_bad_reg (Rd);
6920 reject_bad_reg (Rn);
6921 }
6922 else
6923 {
6924 constraint (Rd == REG_PC, BAD_PC);
6925 constraint (Rn == REG_PC, BAD_PC);
6926 }
6927
6928 inst.instruction |= inst.operands[0].reg << 8;
6929 inst.instruction |= inst.operands[1].imm << 4;
6930 inst.instruction |= Rd << 12;
6931 inst.instruction |= Rn << 16;
6932 inst.instruction |= inst.operands[4].reg;
6933 }
6934
6935 static void
6936 do_cpsi (void)
6937 {
6938 inst.instruction |= inst.operands[0].imm << 6;
6939 if (inst.operands[1].present)
6940 {
6941 inst.instruction |= CPSI_MMOD;
6942 inst.instruction |= inst.operands[1].imm;
6943 }
6944 }
6945
6946 static void
6947 do_dbg (void)
6948 {
6949 inst.instruction |= inst.operands[0].imm;
6950 }
6951
6952 static void
6953 do_it (void)
6954 {
6955 /* There is no IT instruction in ARM mode. We
6956 process it but do not generate code for it. */
6957 inst.size = 0;
6958 }
6959
6960 static void
6961 do_ldmstm (void)
6962 {
6963 int base_reg = inst.operands[0].reg;
6964 int range = inst.operands[1].imm;
6965
6966 inst.instruction |= base_reg << 16;
6967 inst.instruction |= range;
6968
6969 if (inst.operands[1].writeback)
6970 inst.instruction |= LDM_TYPE_2_OR_3;
6971
6972 if (inst.operands[0].writeback)
6973 {
6974 inst.instruction |= WRITE_BACK;
6975 /* Check for unpredictable uses of writeback. */
6976 if (inst.instruction & LOAD_BIT)
6977 {
6978 /* Not allowed in LDM type 2. */
6979 if ((inst.instruction & LDM_TYPE_2_OR_3)
6980 && ((range & (1 << REG_PC)) == 0))
6981 as_warn (_("writeback of base register is UNPREDICTABLE"));
6982 /* Only allowed if base reg not in list for other types. */
6983 else if (range & (1 << base_reg))
6984 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6985 }
6986 else /* STM. */
6987 {
6988 /* Not allowed for type 2. */
6989 if (inst.instruction & LDM_TYPE_2_OR_3)
6990 as_warn (_("writeback of base register is UNPREDICTABLE"));
6991 /* Only allowed if base reg not in list, or first in list. */
6992 else if ((range & (1 << base_reg))
6993 && (range & ((1 << base_reg) - 1)))
6994 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6995 }
6996 }
6997 }
6998
6999 /* ARMv5TE load-consecutive (argument parse)
7000 Mode is like LDRH.
7001
7002 LDRccD R, mode
7003 STRccD R, mode. */
7004
7005 static void
7006 do_ldrd (void)
7007 {
7008 constraint (inst.operands[0].reg % 2 != 0,
7009 _("first destination register must be even"));
7010 constraint (inst.operands[1].present
7011 && inst.operands[1].reg != inst.operands[0].reg + 1,
7012 _("can only load two consecutive registers"));
7013 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7014 constraint (!inst.operands[2].isreg, _("'[' expected"));
7015
7016 if (!inst.operands[1].present)
7017 inst.operands[1].reg = inst.operands[0].reg + 1;
7018
7019 if (inst.instruction & LOAD_BIT)
7020 {
7021 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7022 register and the first register written; we have to diagnose
7023 overlap between the base and the second register written here. */
7024
7025 if (inst.operands[2].reg == inst.operands[1].reg
7026 && (inst.operands[2].writeback || inst.operands[2].postind))
7027 as_warn (_("base register written back, and overlaps "
7028 "second destination register"));
7029
7030 /* For an index-register load, the index register must not overlap the
7031 destination (even if not write-back). */
7032 else if (inst.operands[2].immisreg
7033 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7034 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7035 as_warn (_("index register overlaps destination register"));
7036 }
7037
7038 inst.instruction |= inst.operands[0].reg << 12;
7039 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7040 }
7041
7042 static void
7043 do_ldrex (void)
7044 {
7045 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7046 || inst.operands[1].postind || inst.operands[1].writeback
7047 || inst.operands[1].immisreg || inst.operands[1].shifted
7048 || inst.operands[1].negative
7049 /* This can arise if the programmer has written
7050 strex rN, rM, foo
7051 or if they have mistakenly used a register name as the last
7052 operand, eg:
7053 strex rN, rM, rX
7054 It is very difficult to distinguish between these two cases
7055 because "rX" might actually be a label. ie the register
7056 name has been occluded by a symbol of the same name. So we
7057 just generate a general 'bad addressing mode' type error
7058 message and leave it up to the programmer to discover the
7059 true cause and fix their mistake. */
7060 || (inst.operands[1].reg == REG_PC),
7061 BAD_ADDR_MODE);
7062
7063 constraint (inst.reloc.exp.X_op != O_constant
7064 || inst.reloc.exp.X_add_number != 0,
7065 _("offset must be zero in ARM encoding"));
7066
7067 inst.instruction |= inst.operands[0].reg << 12;
7068 inst.instruction |= inst.operands[1].reg << 16;
7069 inst.reloc.type = BFD_RELOC_UNUSED;
7070 }
7071
7072 static void
7073 do_ldrexd (void)
7074 {
7075 constraint (inst.operands[0].reg % 2 != 0,
7076 _("even register required"));
7077 constraint (inst.operands[1].present
7078 && inst.operands[1].reg != inst.operands[0].reg + 1,
7079 _("can only load two consecutive registers"));
7080 /* If op 1 were present and equal to PC, this function wouldn't
7081 have been called in the first place. */
7082 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7083
7084 inst.instruction |= inst.operands[0].reg << 12;
7085 inst.instruction |= inst.operands[2].reg << 16;
7086 }
7087
7088 static void
7089 do_ldst (void)
7090 {
7091 inst.instruction |= inst.operands[0].reg << 12;
7092 if (!inst.operands[1].isreg)
7093 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7094 return;
7095 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7096 }
7097
7098 static void
7099 do_ldstt (void)
7100 {
7101 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7102 reject [Rn,...]. */
7103 if (inst.operands[1].preind)
7104 {
7105 constraint (inst.reloc.exp.X_op != O_constant
7106 || inst.reloc.exp.X_add_number != 0,
7107 _("this instruction requires a post-indexed address"));
7108
7109 inst.operands[1].preind = 0;
7110 inst.operands[1].postind = 1;
7111 inst.operands[1].writeback = 1;
7112 }
7113 inst.instruction |= inst.operands[0].reg << 12;
7114 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7115 }
7116
7117 /* Halfword and signed-byte load/store operations. */
7118
7119 static void
7120 do_ldstv4 (void)
7121 {
7122 inst.instruction |= inst.operands[0].reg << 12;
7123 if (!inst.operands[1].isreg)
7124 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7125 return;
7126 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7127 }
7128
7129 static void
7130 do_ldsttv4 (void)
7131 {
7132 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7133 reject [Rn,...]. */
7134 if (inst.operands[1].preind)
7135 {
7136 constraint (inst.reloc.exp.X_op != O_constant
7137 || inst.reloc.exp.X_add_number != 0,
7138 _("this instruction requires a post-indexed address"));
7139
7140 inst.operands[1].preind = 0;
7141 inst.operands[1].postind = 1;
7142 inst.operands[1].writeback = 1;
7143 }
7144 inst.instruction |= inst.operands[0].reg << 12;
7145 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7146 }
7147
7148 /* Co-processor register load/store.
7149 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7150 static void
7151 do_lstc (void)
7152 {
7153 inst.instruction |= inst.operands[0].reg << 8;
7154 inst.instruction |= inst.operands[1].reg << 12;
7155 encode_arm_cp_address (2, TRUE, TRUE, 0);
7156 }
7157
7158 static void
7159 do_mlas (void)
7160 {
7161 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7162 if (inst.operands[0].reg == inst.operands[1].reg
7163 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7164 && !(inst.instruction & 0x00400000))
7165 as_tsktsk (_("Rd and Rm should be different in mla"));
7166
7167 inst.instruction |= inst.operands[0].reg << 16;
7168 inst.instruction |= inst.operands[1].reg;
7169 inst.instruction |= inst.operands[2].reg << 8;
7170 inst.instruction |= inst.operands[3].reg << 12;
7171 }
7172
7173 static void
7174 do_mov (void)
7175 {
7176 inst.instruction |= inst.operands[0].reg << 12;
7177 encode_arm_shifter_operand (1);
7178 }
7179
7180 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7181 static void
7182 do_mov16 (void)
7183 {
7184 bfd_vma imm;
7185 bfd_boolean top;
7186
7187 top = (inst.instruction & 0x00400000) != 0;
7188 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7189 _(":lower16: not allowed this instruction"));
7190 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7191 _(":upper16: not allowed instruction"));
7192 inst.instruction |= inst.operands[0].reg << 12;
7193 if (inst.reloc.type == BFD_RELOC_UNUSED)
7194 {
7195 imm = inst.reloc.exp.X_add_number;
7196 /* The value is in two pieces: 0:11, 16:19. */
7197 inst.instruction |= (imm & 0x00000fff);
7198 inst.instruction |= (imm & 0x0000f000) << 4;
7199 }
7200 }
7201
7202 static void do_vfp_nsyn_opcode (const char *);
7203
7204 static int
7205 do_vfp_nsyn_mrs (void)
7206 {
7207 if (inst.operands[0].isvec)
7208 {
7209 if (inst.operands[1].reg != 1)
7210 first_error (_("operand 1 must be FPSCR"));
7211 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7212 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7213 do_vfp_nsyn_opcode ("fmstat");
7214 }
7215 else if (inst.operands[1].isvec)
7216 do_vfp_nsyn_opcode ("fmrx");
7217 else
7218 return FAIL;
7219
7220 return SUCCESS;
7221 }
7222
7223 static int
7224 do_vfp_nsyn_msr (void)
7225 {
7226 if (inst.operands[0].isvec)
7227 do_vfp_nsyn_opcode ("fmxr");
7228 else
7229 return FAIL;
7230
7231 return SUCCESS;
7232 }
7233
7234 static void
7235 do_mrs (void)
7236 {
7237 if (do_vfp_nsyn_mrs () == SUCCESS)
7238 return;
7239
7240 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7241 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7242 != (PSR_c|PSR_f),
7243 _("'CPSR' or 'SPSR' expected"));
7244 inst.instruction |= inst.operands[0].reg << 12;
7245 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7246 }
7247
7248 /* Two possible forms:
7249 "{C|S}PSR_<field>, Rm",
7250 "{C|S}PSR_f, #expression". */
7251
7252 static void
7253 do_msr (void)
7254 {
7255 if (do_vfp_nsyn_msr () == SUCCESS)
7256 return;
7257
7258 inst.instruction |= inst.operands[0].imm;
7259 if (inst.operands[1].isreg)
7260 inst.instruction |= inst.operands[1].reg;
7261 else
7262 {
7263 inst.instruction |= INST_IMMEDIATE;
7264 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7265 inst.reloc.pc_rel = 0;
7266 }
7267 }
7268
7269 static void
7270 do_mul (void)
7271 {
7272 if (!inst.operands[2].present)
7273 inst.operands[2].reg = inst.operands[0].reg;
7274 inst.instruction |= inst.operands[0].reg << 16;
7275 inst.instruction |= inst.operands[1].reg;
7276 inst.instruction |= inst.operands[2].reg << 8;
7277
7278 if (inst.operands[0].reg == inst.operands[1].reg
7279 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7280 as_tsktsk (_("Rd and Rm should be different in mul"));
7281 }
7282
7283 /* Long Multiply Parser
7284 UMULL RdLo, RdHi, Rm, Rs
7285 SMULL RdLo, RdHi, Rm, Rs
7286 UMLAL RdLo, RdHi, Rm, Rs
7287 SMLAL RdLo, RdHi, Rm, Rs. */
7288
7289 static void
7290 do_mull (void)
7291 {
7292 inst.instruction |= inst.operands[0].reg << 12;
7293 inst.instruction |= inst.operands[1].reg << 16;
7294 inst.instruction |= inst.operands[2].reg;
7295 inst.instruction |= inst.operands[3].reg << 8;
7296
7297 /* rdhi and rdlo must be different. */
7298 if (inst.operands[0].reg == inst.operands[1].reg)
7299 as_tsktsk (_("rdhi and rdlo must be different"));
7300
7301 /* rdhi, rdlo and rm must all be different before armv6. */
7302 if ((inst.operands[0].reg == inst.operands[2].reg
7303 || inst.operands[1].reg == inst.operands[2].reg)
7304 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7305 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7306 }
7307
7308 static void
7309 do_nop (void)
7310 {
7311 if (inst.operands[0].present)
7312 {
7313 /* Architectural NOP hints are CPSR sets with no bits selected. */
7314 inst.instruction &= 0xf0000000;
7315 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7316 }
7317 }
7318
7319 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7320 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7321 Condition defaults to COND_ALWAYS.
7322 Error if Rd, Rn or Rm are R15. */
7323
7324 static void
7325 do_pkhbt (void)
7326 {
7327 inst.instruction |= inst.operands[0].reg << 12;
7328 inst.instruction |= inst.operands[1].reg << 16;
7329 inst.instruction |= inst.operands[2].reg;
7330 if (inst.operands[3].present)
7331 encode_arm_shift (3);
7332 }
7333
7334 /* ARM V6 PKHTB (Argument Parse). */
7335
7336 static void
7337 do_pkhtb (void)
7338 {
7339 if (!inst.operands[3].present)
7340 {
7341 /* If the shift specifier is omitted, turn the instruction
7342 into pkhbt rd, rm, rn. */
7343 inst.instruction &= 0xfff00010;
7344 inst.instruction |= inst.operands[0].reg << 12;
7345 inst.instruction |= inst.operands[1].reg;
7346 inst.instruction |= inst.operands[2].reg << 16;
7347 }
7348 else
7349 {
7350 inst.instruction |= inst.operands[0].reg << 12;
7351 inst.instruction |= inst.operands[1].reg << 16;
7352 inst.instruction |= inst.operands[2].reg;
7353 encode_arm_shift (3);
7354 }
7355 }
7356
7357 /* ARMv5TE: Preload-Cache
7358
7359 PLD <addr_mode>
7360
7361 Syntactically, like LDR with B=1, W=0, L=1. */
7362
7363 static void
7364 do_pld (void)
7365 {
7366 constraint (!inst.operands[0].isreg,
7367 _("'[' expected after PLD mnemonic"));
7368 constraint (inst.operands[0].postind,
7369 _("post-indexed expression used in preload instruction"));
7370 constraint (inst.operands[0].writeback,
7371 _("writeback used in preload instruction"));
7372 constraint (!inst.operands[0].preind,
7373 _("unindexed addressing used in preload instruction"));
7374 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7375 }
7376
7377 /* ARMv7: PLI <addr_mode> */
7378 static void
7379 do_pli (void)
7380 {
7381 constraint (!inst.operands[0].isreg,
7382 _("'[' expected after PLI mnemonic"));
7383 constraint (inst.operands[0].postind,
7384 _("post-indexed expression used in preload instruction"));
7385 constraint (inst.operands[0].writeback,
7386 _("writeback used in preload instruction"));
7387 constraint (!inst.operands[0].preind,
7388 _("unindexed addressing used in preload instruction"));
7389 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7390 inst.instruction &= ~PRE_INDEX;
7391 }
7392
7393 static void
7394 do_push_pop (void)
7395 {
7396 inst.operands[1] = inst.operands[0];
7397 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7398 inst.operands[0].isreg = 1;
7399 inst.operands[0].writeback = 1;
7400 inst.operands[0].reg = REG_SP;
7401 do_ldmstm ();
7402 }
7403
7404 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7405 word at the specified address and the following word
7406 respectively.
7407 Unconditionally executed.
7408 Error if Rn is R15. */
7409
7410 static void
7411 do_rfe (void)
7412 {
7413 inst.instruction |= inst.operands[0].reg << 16;
7414 if (inst.operands[0].writeback)
7415 inst.instruction |= WRITE_BACK;
7416 }
7417
7418 /* ARM V6 ssat (argument parse). */
7419
7420 static void
7421 do_ssat (void)
7422 {
7423 inst.instruction |= inst.operands[0].reg << 12;
7424 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7425 inst.instruction |= inst.operands[2].reg;
7426
7427 if (inst.operands[3].present)
7428 encode_arm_shift (3);
7429 }
7430
7431 /* ARM V6 usat (argument parse). */
7432
7433 static void
7434 do_usat (void)
7435 {
7436 inst.instruction |= inst.operands[0].reg << 12;
7437 inst.instruction |= inst.operands[1].imm << 16;
7438 inst.instruction |= inst.operands[2].reg;
7439
7440 if (inst.operands[3].present)
7441 encode_arm_shift (3);
7442 }
7443
7444 /* ARM V6 ssat16 (argument parse). */
7445
7446 static void
7447 do_ssat16 (void)
7448 {
7449 inst.instruction |= inst.operands[0].reg << 12;
7450 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7451 inst.instruction |= inst.operands[2].reg;
7452 }
7453
7454 static void
7455 do_usat16 (void)
7456 {
7457 inst.instruction |= inst.operands[0].reg << 12;
7458 inst.instruction |= inst.operands[1].imm << 16;
7459 inst.instruction |= inst.operands[2].reg;
7460 }
7461
7462 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7463 preserving the other bits.
7464
7465 setend <endian_specifier>, where <endian_specifier> is either
7466 BE or LE. */
7467
7468 static void
7469 do_setend (void)
7470 {
7471 if (inst.operands[0].imm)
7472 inst.instruction |= 0x200;
7473 }
7474
7475 static void
7476 do_shift (void)
7477 {
7478 unsigned int Rm = (inst.operands[1].present
7479 ? inst.operands[1].reg
7480 : inst.operands[0].reg);
7481
7482 inst.instruction |= inst.operands[0].reg << 12;
7483 inst.instruction |= Rm;
7484 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7485 {
7486 inst.instruction |= inst.operands[2].reg << 8;
7487 inst.instruction |= SHIFT_BY_REG;
7488 }
7489 else
7490 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7491 }
7492
7493 static void
7494 do_smc (void)
7495 {
7496 inst.reloc.type = BFD_RELOC_ARM_SMC;
7497 inst.reloc.pc_rel = 0;
7498 }
7499
7500 static void
7501 do_swi (void)
7502 {
7503 inst.reloc.type = BFD_RELOC_ARM_SWI;
7504 inst.reloc.pc_rel = 0;
7505 }
7506
7507 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7508 SMLAxy{cond} Rd,Rm,Rs,Rn
7509 SMLAWy{cond} Rd,Rm,Rs,Rn
7510 Error if any register is R15. */
7511
7512 static void
7513 do_smla (void)
7514 {
7515 inst.instruction |= inst.operands[0].reg << 16;
7516 inst.instruction |= inst.operands[1].reg;
7517 inst.instruction |= inst.operands[2].reg << 8;
7518 inst.instruction |= inst.operands[3].reg << 12;
7519 }
7520
7521 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7522 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7523 Error if any register is R15.
7524 Warning if Rdlo == Rdhi. */
7525
7526 static void
7527 do_smlal (void)
7528 {
7529 inst.instruction |= inst.operands[0].reg << 12;
7530 inst.instruction |= inst.operands[1].reg << 16;
7531 inst.instruction |= inst.operands[2].reg;
7532 inst.instruction |= inst.operands[3].reg << 8;
7533
7534 if (inst.operands[0].reg == inst.operands[1].reg)
7535 as_tsktsk (_("rdhi and rdlo must be different"));
7536 }
7537
7538 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7539 SMULxy{cond} Rd,Rm,Rs
7540 Error if any register is R15. */
7541
7542 static void
7543 do_smul (void)
7544 {
7545 inst.instruction |= inst.operands[0].reg << 16;
7546 inst.instruction |= inst.operands[1].reg;
7547 inst.instruction |= inst.operands[2].reg << 8;
7548 }
7549
7550 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7551 the same for both ARM and Thumb-2. */
7552
7553 static void
7554 do_srs (void)
7555 {
7556 int reg;
7557
7558 if (inst.operands[0].present)
7559 {
7560 reg = inst.operands[0].reg;
7561 constraint (reg != REG_SP, _("SRS base register must be r13"));
7562 }
7563 else
7564 reg = REG_SP;
7565
7566 inst.instruction |= reg << 16;
7567 inst.instruction |= inst.operands[1].imm;
7568 if (inst.operands[0].writeback || inst.operands[1].writeback)
7569 inst.instruction |= WRITE_BACK;
7570 }
7571
7572 /* ARM V6 strex (argument parse). */
7573
7574 static void
7575 do_strex (void)
7576 {
7577 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7578 || inst.operands[2].postind || inst.operands[2].writeback
7579 || inst.operands[2].immisreg || inst.operands[2].shifted
7580 || inst.operands[2].negative
7581 /* See comment in do_ldrex(). */
7582 || (inst.operands[2].reg == REG_PC),
7583 BAD_ADDR_MODE);
7584
7585 constraint (inst.operands[0].reg == inst.operands[1].reg
7586 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7587
7588 constraint (inst.reloc.exp.X_op != O_constant
7589 || inst.reloc.exp.X_add_number != 0,
7590 _("offset must be zero in ARM encoding"));
7591
7592 inst.instruction |= inst.operands[0].reg << 12;
7593 inst.instruction |= inst.operands[1].reg;
7594 inst.instruction |= inst.operands[2].reg << 16;
7595 inst.reloc.type = BFD_RELOC_UNUSED;
7596 }
7597
7598 static void
7599 do_strexd (void)
7600 {
7601 constraint (inst.operands[1].reg % 2 != 0,
7602 _("even register required"));
7603 constraint (inst.operands[2].present
7604 && inst.operands[2].reg != inst.operands[1].reg + 1,
7605 _("can only store two consecutive registers"));
7606 /* If op 2 were present and equal to PC, this function wouldn't
7607 have been called in the first place. */
7608 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7609
7610 constraint (inst.operands[0].reg == inst.operands[1].reg
7611 || inst.operands[0].reg == inst.operands[1].reg + 1
7612 || inst.operands[0].reg == inst.operands[3].reg,
7613 BAD_OVERLAP);
7614
7615 inst.instruction |= inst.operands[0].reg << 12;
7616 inst.instruction |= inst.operands[1].reg;
7617 inst.instruction |= inst.operands[3].reg << 16;
7618 }
7619
7620 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7621 extends it to 32-bits, and adds the result to a value in another
7622 register. You can specify a rotation by 0, 8, 16, or 24 bits
7623 before extracting the 16-bit value.
7624 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7625 Condition defaults to COND_ALWAYS.
7626 Error if any register uses R15. */
7627
7628 static void
7629 do_sxtah (void)
7630 {
7631 inst.instruction |= inst.operands[0].reg << 12;
7632 inst.instruction |= inst.operands[1].reg << 16;
7633 inst.instruction |= inst.operands[2].reg;
7634 inst.instruction |= inst.operands[3].imm << 10;
7635 }
7636
7637 /* ARM V6 SXTH.
7638
7639 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7640 Condition defaults to COND_ALWAYS.
7641 Error if any register uses R15. */
7642
7643 static void
7644 do_sxth (void)
7645 {
7646 inst.instruction |= inst.operands[0].reg << 12;
7647 inst.instruction |= inst.operands[1].reg;
7648 inst.instruction |= inst.operands[2].imm << 10;
7649 }
7650 \f
7651 /* VFP instructions. In a logical order: SP variant first, monad
7652 before dyad, arithmetic then move then load/store. */
7653
7654 static void
7655 do_vfp_sp_monadic (void)
7656 {
7657 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7658 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7659 }
7660
7661 static void
7662 do_vfp_sp_dyadic (void)
7663 {
7664 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7665 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7666 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7667 }
7668
7669 static void
7670 do_vfp_sp_compare_z (void)
7671 {
7672 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7673 }
7674
7675 static void
7676 do_vfp_dp_sp_cvt (void)
7677 {
7678 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7679 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7680 }
7681
7682 static void
7683 do_vfp_sp_dp_cvt (void)
7684 {
7685 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7686 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7687 }
7688
7689 static void
7690 do_vfp_reg_from_sp (void)
7691 {
7692 inst.instruction |= inst.operands[0].reg << 12;
7693 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7694 }
7695
7696 static void
7697 do_vfp_reg2_from_sp2 (void)
7698 {
7699 constraint (inst.operands[2].imm != 2,
7700 _("only two consecutive VFP SP registers allowed here"));
7701 inst.instruction |= inst.operands[0].reg << 12;
7702 inst.instruction |= inst.operands[1].reg << 16;
7703 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7704 }
7705
7706 static void
7707 do_vfp_sp_from_reg (void)
7708 {
7709 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7710 inst.instruction |= inst.operands[1].reg << 12;
7711 }
7712
7713 static void
7714 do_vfp_sp2_from_reg2 (void)
7715 {
7716 constraint (inst.operands[0].imm != 2,
7717 _("only two consecutive VFP SP registers allowed here"));
7718 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7719 inst.instruction |= inst.operands[1].reg << 12;
7720 inst.instruction |= inst.operands[2].reg << 16;
7721 }
7722
7723 static void
7724 do_vfp_sp_ldst (void)
7725 {
7726 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7727 encode_arm_cp_address (1, FALSE, TRUE, 0);
7728 }
7729
7730 static void
7731 do_vfp_dp_ldst (void)
7732 {
7733 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7734 encode_arm_cp_address (1, FALSE, TRUE, 0);
7735 }
7736
7737
7738 static void
7739 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7740 {
7741 if (inst.operands[0].writeback)
7742 inst.instruction |= WRITE_BACK;
7743 else
7744 constraint (ldstm_type != VFP_LDSTMIA,
7745 _("this addressing mode requires base-register writeback"));
7746 inst.instruction |= inst.operands[0].reg << 16;
7747 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7748 inst.instruction |= inst.operands[1].imm;
7749 }
7750
7751 static void
7752 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7753 {
7754 int count;
7755
7756 if (inst.operands[0].writeback)
7757 inst.instruction |= WRITE_BACK;
7758 else
7759 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7760 _("this addressing mode requires base-register writeback"));
7761
7762 inst.instruction |= inst.operands[0].reg << 16;
7763 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7764
7765 count = inst.operands[1].imm << 1;
7766 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7767 count += 1;
7768
7769 inst.instruction |= count;
7770 }
7771
7772 static void
7773 do_vfp_sp_ldstmia (void)
7774 {
7775 vfp_sp_ldstm (VFP_LDSTMIA);
7776 }
7777
7778 static void
7779 do_vfp_sp_ldstmdb (void)
7780 {
7781 vfp_sp_ldstm (VFP_LDSTMDB);
7782 }
7783
7784 static void
7785 do_vfp_dp_ldstmia (void)
7786 {
7787 vfp_dp_ldstm (VFP_LDSTMIA);
7788 }
7789
7790 static void
7791 do_vfp_dp_ldstmdb (void)
7792 {
7793 vfp_dp_ldstm (VFP_LDSTMDB);
7794 }
7795
7796 static void
7797 do_vfp_xp_ldstmia (void)
7798 {
7799 vfp_dp_ldstm (VFP_LDSTMIAX);
7800 }
7801
7802 static void
7803 do_vfp_xp_ldstmdb (void)
7804 {
7805 vfp_dp_ldstm (VFP_LDSTMDBX);
7806 }
7807
7808 static void
7809 do_vfp_dp_rd_rm (void)
7810 {
7811 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7812 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7813 }
7814
7815 static void
7816 do_vfp_dp_rn_rd (void)
7817 {
7818 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7819 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7820 }
7821
7822 static void
7823 do_vfp_dp_rd_rn (void)
7824 {
7825 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7826 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7827 }
7828
7829 static void
7830 do_vfp_dp_rd_rn_rm (void)
7831 {
7832 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7833 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7834 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7835 }
7836
7837 static void
7838 do_vfp_dp_rd (void)
7839 {
7840 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7841 }
7842
7843 static void
7844 do_vfp_dp_rm_rd_rn (void)
7845 {
7846 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7847 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7848 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7849 }
7850
7851 /* VFPv3 instructions. */
7852 static void
7853 do_vfp_sp_const (void)
7854 {
7855 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7856 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7857 inst.instruction |= (inst.operands[1].imm & 0x0f);
7858 }
7859
7860 static void
7861 do_vfp_dp_const (void)
7862 {
7863 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7864 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7865 inst.instruction |= (inst.operands[1].imm & 0x0f);
7866 }
7867
7868 static void
7869 vfp_conv (int srcsize)
7870 {
7871 unsigned immbits = srcsize - inst.operands[1].imm;
7872 inst.instruction |= (immbits & 1) << 5;
7873 inst.instruction |= (immbits >> 1);
7874 }
7875
7876 static void
7877 do_vfp_sp_conv_16 (void)
7878 {
7879 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7880 vfp_conv (16);
7881 }
7882
7883 static void
7884 do_vfp_dp_conv_16 (void)
7885 {
7886 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7887 vfp_conv (16);
7888 }
7889
7890 static void
7891 do_vfp_sp_conv_32 (void)
7892 {
7893 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7894 vfp_conv (32);
7895 }
7896
7897 static void
7898 do_vfp_dp_conv_32 (void)
7899 {
7900 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7901 vfp_conv (32);
7902 }
7903 \f
7904 /* FPA instructions. Also in a logical order. */
7905
7906 static void
7907 do_fpa_cmp (void)
7908 {
7909 inst.instruction |= inst.operands[0].reg << 16;
7910 inst.instruction |= inst.operands[1].reg;
7911 }
7912
7913 static void
7914 do_fpa_ldmstm (void)
7915 {
7916 inst.instruction |= inst.operands[0].reg << 12;
7917 switch (inst.operands[1].imm)
7918 {
7919 case 1: inst.instruction |= CP_T_X; break;
7920 case 2: inst.instruction |= CP_T_Y; break;
7921 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7922 case 4: break;
7923 default: abort ();
7924 }
7925
7926 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7927 {
7928 /* The instruction specified "ea" or "fd", so we can only accept
7929 [Rn]{!}. The instruction does not really support stacking or
7930 unstacking, so we have to emulate these by setting appropriate
7931 bits and offsets. */
7932 constraint (inst.reloc.exp.X_op != O_constant
7933 || inst.reloc.exp.X_add_number != 0,
7934 _("this instruction does not support indexing"));
7935
7936 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7937 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7938
7939 if (!(inst.instruction & INDEX_UP))
7940 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7941
7942 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7943 {
7944 inst.operands[2].preind = 0;
7945 inst.operands[2].postind = 1;
7946 }
7947 }
7948
7949 encode_arm_cp_address (2, TRUE, TRUE, 0);
7950 }
7951 \f
7952 /* iWMMXt instructions: strictly in alphabetical order. */
7953
7954 static void
7955 do_iwmmxt_tandorc (void)
7956 {
7957 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7958 }
7959
7960 static void
7961 do_iwmmxt_textrc (void)
7962 {
7963 inst.instruction |= inst.operands[0].reg << 12;
7964 inst.instruction |= inst.operands[1].imm;
7965 }
7966
7967 static void
7968 do_iwmmxt_textrm (void)
7969 {
7970 inst.instruction |= inst.operands[0].reg << 12;
7971 inst.instruction |= inst.operands[1].reg << 16;
7972 inst.instruction |= inst.operands[2].imm;
7973 }
7974
7975 static void
7976 do_iwmmxt_tinsr (void)
7977 {
7978 inst.instruction |= inst.operands[0].reg << 16;
7979 inst.instruction |= inst.operands[1].reg << 12;
7980 inst.instruction |= inst.operands[2].imm;
7981 }
7982
7983 static void
7984 do_iwmmxt_tmia (void)
7985 {
7986 inst.instruction |= inst.operands[0].reg << 5;
7987 inst.instruction |= inst.operands[1].reg;
7988 inst.instruction |= inst.operands[2].reg << 12;
7989 }
7990
7991 static void
7992 do_iwmmxt_waligni (void)
7993 {
7994 inst.instruction |= inst.operands[0].reg << 12;
7995 inst.instruction |= inst.operands[1].reg << 16;
7996 inst.instruction |= inst.operands[2].reg;
7997 inst.instruction |= inst.operands[3].imm << 20;
7998 }
7999
8000 static void
8001 do_iwmmxt_wmerge (void)
8002 {
8003 inst.instruction |= inst.operands[0].reg << 12;
8004 inst.instruction |= inst.operands[1].reg << 16;
8005 inst.instruction |= inst.operands[2].reg;
8006 inst.instruction |= inst.operands[3].imm << 21;
8007 }
8008
8009 static void
8010 do_iwmmxt_wmov (void)
8011 {
8012 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8013 inst.instruction |= inst.operands[0].reg << 12;
8014 inst.instruction |= inst.operands[1].reg << 16;
8015 inst.instruction |= inst.operands[1].reg;
8016 }
8017
8018 static void
8019 do_iwmmxt_wldstbh (void)
8020 {
8021 int reloc;
8022 inst.instruction |= inst.operands[0].reg << 12;
8023 if (thumb_mode)
8024 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8025 else
8026 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8027 encode_arm_cp_address (1, TRUE, FALSE, reloc);
8028 }
8029
8030 static void
8031 do_iwmmxt_wldstw (void)
8032 {
8033 /* RIWR_RIWC clears .isreg for a control register. */
8034 if (!inst.operands[0].isreg)
8035 {
8036 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8037 inst.instruction |= 0xf0000000;
8038 }
8039
8040 inst.instruction |= inst.operands[0].reg << 12;
8041 encode_arm_cp_address (1, TRUE, TRUE, 0);
8042 }
8043
8044 static void
8045 do_iwmmxt_wldstd (void)
8046 {
8047 inst.instruction |= inst.operands[0].reg << 12;
8048 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
8049 && inst.operands[1].immisreg)
8050 {
8051 inst.instruction &= ~0x1a000ff;
8052 inst.instruction |= (0xf << 28);
8053 if (inst.operands[1].preind)
8054 inst.instruction |= PRE_INDEX;
8055 if (!inst.operands[1].negative)
8056 inst.instruction |= INDEX_UP;
8057 if (inst.operands[1].writeback)
8058 inst.instruction |= WRITE_BACK;
8059 inst.instruction |= inst.operands[1].reg << 16;
8060 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8061 inst.instruction |= inst.operands[1].imm;
8062 }
8063 else
8064 encode_arm_cp_address (1, TRUE, FALSE, 0);
8065 }
8066
8067 static void
8068 do_iwmmxt_wshufh (void)
8069 {
8070 inst.instruction |= inst.operands[0].reg << 12;
8071 inst.instruction |= inst.operands[1].reg << 16;
8072 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
8073 inst.instruction |= (inst.operands[2].imm & 0x0f);
8074 }
8075
8076 static void
8077 do_iwmmxt_wzero (void)
8078 {
8079 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8080 inst.instruction |= inst.operands[0].reg;
8081 inst.instruction |= inst.operands[0].reg << 12;
8082 inst.instruction |= inst.operands[0].reg << 16;
8083 }
8084
8085 static void
8086 do_iwmmxt_wrwrwr_or_imm5 (void)
8087 {
8088 if (inst.operands[2].isreg)
8089 do_rd_rn_rm ();
8090 else {
8091 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8092 _("immediate operand requires iWMMXt2"));
8093 do_rd_rn ();
8094 if (inst.operands[2].imm == 0)
8095 {
8096 switch ((inst.instruction >> 20) & 0xf)
8097 {
8098 case 4:
8099 case 5:
8100 case 6:
8101 case 7:
8102 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8103 inst.operands[2].imm = 16;
8104 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8105 break;
8106 case 8:
8107 case 9:
8108 case 10:
8109 case 11:
8110 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8111 inst.operands[2].imm = 32;
8112 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8113 break;
8114 case 12:
8115 case 13:
8116 case 14:
8117 case 15:
8118 {
8119 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8120 unsigned long wrn;
8121 wrn = (inst.instruction >> 16) & 0xf;
8122 inst.instruction &= 0xff0fff0f;
8123 inst.instruction |= wrn;
8124 /* Bail out here; the instruction is now assembled. */
8125 return;
8126 }
8127 }
8128 }
8129 /* Map 32 -> 0, etc. */
8130 inst.operands[2].imm &= 0x1f;
8131 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8132 }
8133 }
8134 \f
8135 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8136 operations first, then control, shift, and load/store. */
8137
8138 /* Insns like "foo X,Y,Z". */
8139
8140 static void
8141 do_mav_triple (void)
8142 {
8143 inst.instruction |= inst.operands[0].reg << 16;
8144 inst.instruction |= inst.operands[1].reg;
8145 inst.instruction |= inst.operands[2].reg << 12;
8146 }
8147
8148 /* Insns like "foo W,X,Y,Z".
8149 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8150
8151 static void
8152 do_mav_quad (void)
8153 {
8154 inst.instruction |= inst.operands[0].reg << 5;
8155 inst.instruction |= inst.operands[1].reg << 12;
8156 inst.instruction |= inst.operands[2].reg << 16;
8157 inst.instruction |= inst.operands[3].reg;
8158 }
8159
8160 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8161 static void
8162 do_mav_dspsc (void)
8163 {
8164 inst.instruction |= inst.operands[1].reg << 12;
8165 }
8166
8167 /* Maverick shift immediate instructions.
8168 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8169 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8170
8171 static void
8172 do_mav_shift (void)
8173 {
8174 int imm = inst.operands[2].imm;
8175
8176 inst.instruction |= inst.operands[0].reg << 12;
8177 inst.instruction |= inst.operands[1].reg << 16;
8178
8179 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8180 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8181 Bit 4 should be 0. */
8182 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8183
8184 inst.instruction |= imm;
8185 }
8186 \f
8187 /* XScale instructions. Also sorted arithmetic before move. */
8188
8189 /* Xscale multiply-accumulate (argument parse)
8190 MIAcc acc0,Rm,Rs
8191 MIAPHcc acc0,Rm,Rs
8192 MIAxycc acc0,Rm,Rs. */
8193
8194 static void
8195 do_xsc_mia (void)
8196 {
8197 inst.instruction |= inst.operands[1].reg;
8198 inst.instruction |= inst.operands[2].reg << 12;
8199 }
8200
8201 /* Xscale move-accumulator-register (argument parse)
8202
8203 MARcc acc0,RdLo,RdHi. */
8204
8205 static void
8206 do_xsc_mar (void)
8207 {
8208 inst.instruction |= inst.operands[1].reg << 12;
8209 inst.instruction |= inst.operands[2].reg << 16;
8210 }
8211
8212 /* Xscale move-register-accumulator (argument parse)
8213
8214 MRAcc RdLo,RdHi,acc0. */
8215
8216 static void
8217 do_xsc_mra (void)
8218 {
8219 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8220 inst.instruction |= inst.operands[0].reg << 12;
8221 inst.instruction |= inst.operands[1].reg << 16;
8222 }
8223 \f
8224 /* Encoding functions relevant only to Thumb. */
8225
8226 /* inst.operands[i] is a shifted-register operand; encode
8227 it into inst.instruction in the format used by Thumb32. */
8228
8229 static void
8230 encode_thumb32_shifted_operand (int i)
8231 {
8232 unsigned int value = inst.reloc.exp.X_add_number;
8233 unsigned int shift = inst.operands[i].shift_kind;
8234
8235 constraint (inst.operands[i].immisreg,
8236 _("shift by register not allowed in thumb mode"));
8237 inst.instruction |= inst.operands[i].reg;
8238 if (shift == SHIFT_RRX)
8239 inst.instruction |= SHIFT_ROR << 4;
8240 else
8241 {
8242 constraint (inst.reloc.exp.X_op != O_constant,
8243 _("expression too complex"));
8244
8245 constraint (value > 32
8246 || (value == 32 && (shift == SHIFT_LSL
8247 || shift == SHIFT_ROR)),
8248 _("shift expression is too large"));
8249
8250 if (value == 0)
8251 shift = SHIFT_LSL;
8252 else if (value == 32)
8253 value = 0;
8254
8255 inst.instruction |= shift << 4;
8256 inst.instruction |= (value & 0x1c) << 10;
8257 inst.instruction |= (value & 0x03) << 6;
8258 }
8259 }
8260
8261
8262 /* inst.operands[i] was set up by parse_address. Encode it into a
8263 Thumb32 format load or store instruction. Reject forms that cannot
8264 be used with such instructions. If is_t is true, reject forms that
8265 cannot be used with a T instruction; if is_d is true, reject forms
8266 that cannot be used with a D instruction. */
8267
8268 static void
8269 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8270 {
8271 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8272
8273 constraint (!inst.operands[i].isreg,
8274 _("Instruction does not support =N addresses"));
8275
8276 inst.instruction |= inst.operands[i].reg << 16;
8277 if (inst.operands[i].immisreg)
8278 {
8279 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8280 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8281 constraint (inst.operands[i].negative,
8282 _("Thumb does not support negative register indexing"));
8283 constraint (inst.operands[i].postind,
8284 _("Thumb does not support register post-indexing"));
8285 constraint (inst.operands[i].writeback,
8286 _("Thumb does not support register indexing with writeback"));
8287 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8288 _("Thumb supports only LSL in shifted register indexing"));
8289
8290 inst.instruction |= inst.operands[i].imm;
8291 if (inst.operands[i].shifted)
8292 {
8293 constraint (inst.reloc.exp.X_op != O_constant,
8294 _("expression too complex"));
8295 constraint (inst.reloc.exp.X_add_number < 0
8296 || inst.reloc.exp.X_add_number > 3,
8297 _("shift out of range"));
8298 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8299 }
8300 inst.reloc.type = BFD_RELOC_UNUSED;
8301 }
8302 else if (inst.operands[i].preind)
8303 {
8304 constraint (is_pc && inst.operands[i].writeback,
8305 _("cannot use writeback with PC-relative addressing"));
8306 constraint (is_t && inst.operands[i].writeback,
8307 _("cannot use writeback with this instruction"));
8308
8309 if (is_d)
8310 {
8311 inst.instruction |= 0x01000000;
8312 if (inst.operands[i].writeback)
8313 inst.instruction |= 0x00200000;
8314 }
8315 else
8316 {
8317 inst.instruction |= 0x00000c00;
8318 if (inst.operands[i].writeback)
8319 inst.instruction |= 0x00000100;
8320 }
8321 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8322 }
8323 else if (inst.operands[i].postind)
8324 {
8325 assert (inst.operands[i].writeback);
8326 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8327 constraint (is_t, _("cannot use post-indexing with this instruction"));
8328
8329 if (is_d)
8330 inst.instruction |= 0x00200000;
8331 else
8332 inst.instruction |= 0x00000900;
8333 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8334 }
8335 else /* unindexed - only for coprocessor */
8336 inst.error = _("instruction does not accept unindexed addressing");
8337 }
8338
8339 /* Table of Thumb instructions which exist in both 16- and 32-bit
8340 encodings (the latter only in post-V6T2 cores). The index is the
8341 value used in the insns table below. When there is more than one
8342 possible 16-bit encoding for the instruction, this table always
8343 holds variant (1).
8344 Also contains several pseudo-instructions used during relaxation. */
8345 #define T16_32_TAB \
8346 X(adc, 4140, eb400000), \
8347 X(adcs, 4140, eb500000), \
8348 X(add, 1c00, eb000000), \
8349 X(adds, 1c00, eb100000), \
8350 X(addi, 0000, f1000000), \
8351 X(addis, 0000, f1100000), \
8352 X(add_pc,000f, f20f0000), \
8353 X(add_sp,000d, f10d0000), \
8354 X(adr, 000f, f20f0000), \
8355 X(and, 4000, ea000000), \
8356 X(ands, 4000, ea100000), \
8357 X(asr, 1000, fa40f000), \
8358 X(asrs, 1000, fa50f000), \
8359 X(b, e000, f000b000), \
8360 X(bcond, d000, f0008000), \
8361 X(bic, 4380, ea200000), \
8362 X(bics, 4380, ea300000), \
8363 X(cmn, 42c0, eb100f00), \
8364 X(cmp, 2800, ebb00f00), \
8365 X(cpsie, b660, f3af8400), \
8366 X(cpsid, b670, f3af8600), \
8367 X(cpy, 4600, ea4f0000), \
8368 X(dec_sp,80dd, f1ad0d00), \
8369 X(eor, 4040, ea800000), \
8370 X(eors, 4040, ea900000), \
8371 X(inc_sp,00dd, f10d0d00), \
8372 X(ldmia, c800, e8900000), \
8373 X(ldr, 6800, f8500000), \
8374 X(ldrb, 7800, f8100000), \
8375 X(ldrh, 8800, f8300000), \
8376 X(ldrsb, 5600, f9100000), \
8377 X(ldrsh, 5e00, f9300000), \
8378 X(ldr_pc,4800, f85f0000), \
8379 X(ldr_pc2,4800, f85f0000), \
8380 X(ldr_sp,9800, f85d0000), \
8381 X(lsl, 0000, fa00f000), \
8382 X(lsls, 0000, fa10f000), \
8383 X(lsr, 0800, fa20f000), \
8384 X(lsrs, 0800, fa30f000), \
8385 X(mov, 2000, ea4f0000), \
8386 X(movs, 2000, ea5f0000), \
8387 X(mul, 4340, fb00f000), \
8388 X(muls, 4340, ffffffff), /* no 32b muls */ \
8389 X(mvn, 43c0, ea6f0000), \
8390 X(mvns, 43c0, ea7f0000), \
8391 X(neg, 4240, f1c00000), /* rsb #0 */ \
8392 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8393 X(orr, 4300, ea400000), \
8394 X(orrs, 4300, ea500000), \
8395 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8396 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8397 X(rev, ba00, fa90f080), \
8398 X(rev16, ba40, fa90f090), \
8399 X(revsh, bac0, fa90f0b0), \
8400 X(ror, 41c0, fa60f000), \
8401 X(rors, 41c0, fa70f000), \
8402 X(sbc, 4180, eb600000), \
8403 X(sbcs, 4180, eb700000), \
8404 X(stmia, c000, e8800000), \
8405 X(str, 6000, f8400000), \
8406 X(strb, 7000, f8000000), \
8407 X(strh, 8000, f8200000), \
8408 X(str_sp,9000, f84d0000), \
8409 X(sub, 1e00, eba00000), \
8410 X(subs, 1e00, ebb00000), \
8411 X(subi, 8000, f1a00000), \
8412 X(subis, 8000, f1b00000), \
8413 X(sxtb, b240, fa4ff080), \
8414 X(sxth, b200, fa0ff080), \
8415 X(tst, 4200, ea100f00), \
8416 X(uxtb, b2c0, fa5ff080), \
8417 X(uxth, b280, fa1ff080), \
8418 X(nop, bf00, f3af8000), \
8419 X(yield, bf10, f3af8001), \
8420 X(wfe, bf20, f3af8002), \
8421 X(wfi, bf30, f3af8003), \
8422 X(sev, bf40, f3af9004), /* typo, 8004? */
8423
8424 /* To catch errors in encoding functions, the codes are all offset by
8425 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8426 as 16-bit instructions. */
8427 #define X(a,b,c) T_MNEM_##a
8428 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8429 #undef X
8430
8431 #define X(a,b,c) 0x##b
8432 static const unsigned short thumb_op16[] = { T16_32_TAB };
8433 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8434 #undef X
8435
8436 #define X(a,b,c) 0x##c
8437 static const unsigned int thumb_op32[] = { T16_32_TAB };
8438 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8439 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8440 #undef X
8441 #undef T16_32_TAB
8442
8443 /* Thumb instruction encoders, in alphabetical order. */
8444
8445 /* ADDW or SUBW. */
8446 static void
8447 do_t_add_sub_w (void)
8448 {
8449 int Rd, Rn;
8450
8451 Rd = inst.operands[0].reg;
8452 Rn = inst.operands[1].reg;
8453
8454 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this is the
8455 SP-{plus,minute}-immediate form of the instruction. */
8456 reject_bad_reg (Rd);
8457
8458 inst.instruction |= (Rn << 16) | (Rd << 8);
8459 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8460 }
8461
8462 /* Parse an add or subtract instruction. We get here with inst.instruction
8463 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8464
8465 static void
8466 do_t_add_sub (void)
8467 {
8468 int Rd, Rs, Rn;
8469
8470 Rd = inst.operands[0].reg;
8471 Rs = (inst.operands[1].present
8472 ? inst.operands[1].reg /* Rd, Rs, foo */
8473 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8474
8475 if (unified_syntax)
8476 {
8477 bfd_boolean flags;
8478 bfd_boolean narrow;
8479 int opcode;
8480
8481 flags = (inst.instruction == T_MNEM_adds
8482 || inst.instruction == T_MNEM_subs);
8483 if (flags)
8484 narrow = (current_it_mask == 0);
8485 else
8486 narrow = (current_it_mask != 0);
8487 if (!inst.operands[2].isreg)
8488 {
8489 int add;
8490
8491 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
8492
8493 add = (inst.instruction == T_MNEM_add
8494 || inst.instruction == T_MNEM_adds);
8495 opcode = 0;
8496 if (inst.size_req != 4)
8497 {
8498 /* Attempt to use a narrow opcode, with relaxation if
8499 appropriate. */
8500 if (Rd == REG_SP && Rs == REG_SP && !flags)
8501 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8502 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8503 opcode = T_MNEM_add_sp;
8504 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8505 opcode = T_MNEM_add_pc;
8506 else if (Rd <= 7 && Rs <= 7 && narrow)
8507 {
8508 if (flags)
8509 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8510 else
8511 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8512 }
8513 if (opcode)
8514 {
8515 inst.instruction = THUMB_OP16(opcode);
8516 inst.instruction |= (Rd << 4) | Rs;
8517 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8518 if (inst.size_req != 2)
8519 inst.relax = opcode;
8520 }
8521 else
8522 constraint (inst.size_req == 2, BAD_HIREG);
8523 }
8524 if (inst.size_req == 4
8525 || (inst.size_req != 2 && !opcode))
8526 {
8527 if (Rd == REG_PC)
8528 {
8529 constraint (add, BAD_PC);
8530 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
8531 _("only SUBS PC, LR, #const allowed"));
8532 constraint (inst.reloc.exp.X_op != O_constant,
8533 _("expression too complex"));
8534 constraint (inst.reloc.exp.X_add_number < 0
8535 || inst.reloc.exp.X_add_number > 0xff,
8536 _("immediate value out of range"));
8537 inst.instruction = T2_SUBS_PC_LR
8538 | inst.reloc.exp.X_add_number;
8539 inst.reloc.type = BFD_RELOC_UNUSED;
8540 return;
8541 }
8542 else if (Rs == REG_PC)
8543 {
8544 /* Always use addw/subw. */
8545 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8546 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8547 }
8548 else
8549 {
8550 inst.instruction = THUMB_OP32 (inst.instruction);
8551 inst.instruction = (inst.instruction & 0xe1ffffff)
8552 | 0x10000000;
8553 if (flags)
8554 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8555 else
8556 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8557 }
8558 inst.instruction |= Rd << 8;
8559 inst.instruction |= Rs << 16;
8560 }
8561 }
8562 else
8563 {
8564 Rn = inst.operands[2].reg;
8565 /* See if we can do this with a 16-bit instruction. */
8566 if (!inst.operands[2].shifted && inst.size_req != 4)
8567 {
8568 if (Rd > 7 || Rs > 7 || Rn > 7)
8569 narrow = FALSE;
8570
8571 if (narrow)
8572 {
8573 inst.instruction = ((inst.instruction == T_MNEM_adds
8574 || inst.instruction == T_MNEM_add)
8575 ? T_OPCODE_ADD_R3
8576 : T_OPCODE_SUB_R3);
8577 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8578 return;
8579 }
8580
8581 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
8582 {
8583 /* Thumb-1 cores (except v6-M) require at least one high
8584 register in a narrow non flag setting add. */
8585 if (Rd > 7 || Rn > 7
8586 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
8587 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
8588 {
8589 if (Rd == Rn)
8590 {
8591 Rn = Rs;
8592 Rs = Rd;
8593 }
8594 inst.instruction = T_OPCODE_ADD_HI;
8595 inst.instruction |= (Rd & 8) << 4;
8596 inst.instruction |= (Rd & 7);
8597 inst.instruction |= Rn << 3;
8598 return;
8599 }
8600 }
8601 }
8602
8603 constraint (Rd == REG_PC, BAD_PC);
8604 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
8605 constraint (Rs == REG_PC, BAD_PC);
8606 reject_bad_reg (Rn);
8607
8608 /* If we get here, it can't be done in 16 bits. */
8609 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8610 _("shift must be constant"));
8611 inst.instruction = THUMB_OP32 (inst.instruction);
8612 inst.instruction |= Rd << 8;
8613 inst.instruction |= Rs << 16;
8614 encode_thumb32_shifted_operand (2);
8615 }
8616 }
8617 else
8618 {
8619 constraint (inst.instruction == T_MNEM_adds
8620 || inst.instruction == T_MNEM_subs,
8621 BAD_THUMB32);
8622
8623 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8624 {
8625 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8626 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8627 BAD_HIREG);
8628
8629 inst.instruction = (inst.instruction == T_MNEM_add
8630 ? 0x0000 : 0x8000);
8631 inst.instruction |= (Rd << 4) | Rs;
8632 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8633 return;
8634 }
8635
8636 Rn = inst.operands[2].reg;
8637 constraint (inst.operands[2].shifted, _("unshifted register required"));
8638
8639 /* We now have Rd, Rs, and Rn set to registers. */
8640 if (Rd > 7 || Rs > 7 || Rn > 7)
8641 {
8642 /* Can't do this for SUB. */
8643 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8644 inst.instruction = T_OPCODE_ADD_HI;
8645 inst.instruction |= (Rd & 8) << 4;
8646 inst.instruction |= (Rd & 7);
8647 if (Rs == Rd)
8648 inst.instruction |= Rn << 3;
8649 else if (Rn == Rd)
8650 inst.instruction |= Rs << 3;
8651 else
8652 constraint (1, _("dest must overlap one source register"));
8653 }
8654 else
8655 {
8656 inst.instruction = (inst.instruction == T_MNEM_add
8657 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8658 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8659 }
8660 }
8661 }
8662
8663 static void
8664 do_t_adr (void)
8665 {
8666 unsigned Rd;
8667
8668 Rd = inst.operands[0].reg;
8669 reject_bad_reg (Rd);
8670
8671 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
8672 {
8673 /* Defer to section relaxation. */
8674 inst.relax = inst.instruction;
8675 inst.instruction = THUMB_OP16 (inst.instruction);
8676 inst.instruction |= Rd << 4;
8677 }
8678 else if (unified_syntax && inst.size_req != 2)
8679 {
8680 /* Generate a 32-bit opcode. */
8681 inst.instruction = THUMB_OP32 (inst.instruction);
8682 inst.instruction |= Rd << 8;
8683 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8684 inst.reloc.pc_rel = 1;
8685 }
8686 else
8687 {
8688 /* Generate a 16-bit opcode. */
8689 inst.instruction = THUMB_OP16 (inst.instruction);
8690 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8691 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8692 inst.reloc.pc_rel = 1;
8693
8694 inst.instruction |= Rd << 4;
8695 }
8696 }
8697
8698 /* Arithmetic instructions for which there is just one 16-bit
8699 instruction encoding, and it allows only two low registers.
8700 For maximal compatibility with ARM syntax, we allow three register
8701 operands even when Thumb-32 instructions are not available, as long
8702 as the first two are identical. For instance, both "sbc r0,r1" and
8703 "sbc r0,r0,r1" are allowed. */
8704 static void
8705 do_t_arit3 (void)
8706 {
8707 int Rd, Rs, Rn;
8708
8709 Rd = inst.operands[0].reg;
8710 Rs = (inst.operands[1].present
8711 ? inst.operands[1].reg /* Rd, Rs, foo */
8712 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8713 Rn = inst.operands[2].reg;
8714
8715 reject_bad_reg (Rd);
8716 reject_bad_reg (Rs);
8717 if (inst.operands[2].isreg)
8718 reject_bad_reg (Rn);
8719
8720 if (unified_syntax)
8721 {
8722 if (!inst.operands[2].isreg)
8723 {
8724 /* For an immediate, we always generate a 32-bit opcode;
8725 section relaxation will shrink it later if possible. */
8726 inst.instruction = THUMB_OP32 (inst.instruction);
8727 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8728 inst.instruction |= Rd << 8;
8729 inst.instruction |= Rs << 16;
8730 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8731 }
8732 else
8733 {
8734 bfd_boolean narrow;
8735
8736 /* See if we can do this with a 16-bit instruction. */
8737 if (THUMB_SETS_FLAGS (inst.instruction))
8738 narrow = current_it_mask == 0;
8739 else
8740 narrow = current_it_mask != 0;
8741
8742 if (Rd > 7 || Rn > 7 || Rs > 7)
8743 narrow = FALSE;
8744 if (inst.operands[2].shifted)
8745 narrow = FALSE;
8746 if (inst.size_req == 4)
8747 narrow = FALSE;
8748
8749 if (narrow
8750 && Rd == Rs)
8751 {
8752 inst.instruction = THUMB_OP16 (inst.instruction);
8753 inst.instruction |= Rd;
8754 inst.instruction |= Rn << 3;
8755 return;
8756 }
8757
8758 /* If we get here, it can't be done in 16 bits. */
8759 constraint (inst.operands[2].shifted
8760 && inst.operands[2].immisreg,
8761 _("shift must be constant"));
8762 inst.instruction = THUMB_OP32 (inst.instruction);
8763 inst.instruction |= Rd << 8;
8764 inst.instruction |= Rs << 16;
8765 encode_thumb32_shifted_operand (2);
8766 }
8767 }
8768 else
8769 {
8770 /* On its face this is a lie - the instruction does set the
8771 flags. However, the only supported mnemonic in this mode
8772 says it doesn't. */
8773 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8774
8775 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8776 _("unshifted register required"));
8777 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8778 constraint (Rd != Rs,
8779 _("dest and source1 must be the same register"));
8780
8781 inst.instruction = THUMB_OP16 (inst.instruction);
8782 inst.instruction |= Rd;
8783 inst.instruction |= Rn << 3;
8784 }
8785 }
8786
8787 /* Similarly, but for instructions where the arithmetic operation is
8788 commutative, so we can allow either of them to be different from
8789 the destination operand in a 16-bit instruction. For instance, all
8790 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8791 accepted. */
8792 static void
8793 do_t_arit3c (void)
8794 {
8795 int Rd, Rs, Rn;
8796
8797 Rd = inst.operands[0].reg;
8798 Rs = (inst.operands[1].present
8799 ? inst.operands[1].reg /* Rd, Rs, foo */
8800 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8801 Rn = inst.operands[2].reg;
8802
8803 reject_bad_reg (Rd);
8804 reject_bad_reg (Rs);
8805 if (inst.operands[2].isreg)
8806 reject_bad_reg (Rn);
8807
8808 if (unified_syntax)
8809 {
8810 if (!inst.operands[2].isreg)
8811 {
8812 /* For an immediate, we always generate a 32-bit opcode;
8813 section relaxation will shrink it later if possible. */
8814 inst.instruction = THUMB_OP32 (inst.instruction);
8815 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8816 inst.instruction |= Rd << 8;
8817 inst.instruction |= Rs << 16;
8818 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8819 }
8820 else
8821 {
8822 bfd_boolean narrow;
8823
8824 /* See if we can do this with a 16-bit instruction. */
8825 if (THUMB_SETS_FLAGS (inst.instruction))
8826 narrow = current_it_mask == 0;
8827 else
8828 narrow = current_it_mask != 0;
8829
8830 if (Rd > 7 || Rn > 7 || Rs > 7)
8831 narrow = FALSE;
8832 if (inst.operands[2].shifted)
8833 narrow = FALSE;
8834 if (inst.size_req == 4)
8835 narrow = FALSE;
8836
8837 if (narrow)
8838 {
8839 if (Rd == Rs)
8840 {
8841 inst.instruction = THUMB_OP16 (inst.instruction);
8842 inst.instruction |= Rd;
8843 inst.instruction |= Rn << 3;
8844 return;
8845 }
8846 if (Rd == Rn)
8847 {
8848 inst.instruction = THUMB_OP16 (inst.instruction);
8849 inst.instruction |= Rd;
8850 inst.instruction |= Rs << 3;
8851 return;
8852 }
8853 }
8854
8855 /* If we get here, it can't be done in 16 bits. */
8856 constraint (inst.operands[2].shifted
8857 && inst.operands[2].immisreg,
8858 _("shift must be constant"));
8859 inst.instruction = THUMB_OP32 (inst.instruction);
8860 inst.instruction |= Rd << 8;
8861 inst.instruction |= Rs << 16;
8862 encode_thumb32_shifted_operand (2);
8863 }
8864 }
8865 else
8866 {
8867 /* On its face this is a lie - the instruction does set the
8868 flags. However, the only supported mnemonic in this mode
8869 says it doesn't. */
8870 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8871
8872 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8873 _("unshifted register required"));
8874 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8875
8876 inst.instruction = THUMB_OP16 (inst.instruction);
8877 inst.instruction |= Rd;
8878
8879 if (Rd == Rs)
8880 inst.instruction |= Rn << 3;
8881 else if (Rd == Rn)
8882 inst.instruction |= Rs << 3;
8883 else
8884 constraint (1, _("dest must overlap one source register"));
8885 }
8886 }
8887
8888 static void
8889 do_t_barrier (void)
8890 {
8891 if (inst.operands[0].present)
8892 {
8893 constraint ((inst.instruction & 0xf0) != 0x40
8894 && inst.operands[0].imm != 0xf,
8895 _("bad barrier type"));
8896 inst.instruction |= inst.operands[0].imm;
8897 }
8898 else
8899 inst.instruction |= 0xf;
8900 }
8901
8902 static void
8903 do_t_bfc (void)
8904 {
8905 unsigned Rd;
8906 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8907 constraint (msb > 32, _("bit-field extends past end of register"));
8908 /* The instruction encoding stores the LSB and MSB,
8909 not the LSB and width. */
8910 Rd = inst.operands[0].reg;
8911 reject_bad_reg (Rd);
8912 inst.instruction |= Rd << 8;
8913 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8914 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8915 inst.instruction |= msb - 1;
8916 }
8917
8918 static void
8919 do_t_bfi (void)
8920 {
8921 int Rd, Rn;
8922 unsigned int msb;
8923
8924 Rd = inst.operands[0].reg;
8925 reject_bad_reg (Rd);
8926
8927 /* #0 in second position is alternative syntax for bfc, which is
8928 the same instruction but with REG_PC in the Rm field. */
8929 if (!inst.operands[1].isreg)
8930 Rn = REG_PC;
8931 else
8932 {
8933 Rn = inst.operands[1].reg;
8934 reject_bad_reg (Rn);
8935 }
8936
8937 msb = inst.operands[2].imm + inst.operands[3].imm;
8938 constraint (msb > 32, _("bit-field extends past end of register"));
8939 /* The instruction encoding stores the LSB and MSB,
8940 not the LSB and width. */
8941 inst.instruction |= Rd << 8;
8942 inst.instruction |= Rn << 16;
8943 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8944 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8945 inst.instruction |= msb - 1;
8946 }
8947
8948 static void
8949 do_t_bfx (void)
8950 {
8951 unsigned Rd, Rn;
8952
8953 Rd = inst.operands[0].reg;
8954 Rn = inst.operands[1].reg;
8955
8956 reject_bad_reg (Rd);
8957 reject_bad_reg (Rn);
8958
8959 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8960 _("bit-field extends past end of register"));
8961 inst.instruction |= Rd << 8;
8962 inst.instruction |= Rn << 16;
8963 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8964 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8965 inst.instruction |= inst.operands[3].imm - 1;
8966 }
8967
8968 /* ARM V5 Thumb BLX (argument parse)
8969 BLX <target_addr> which is BLX(1)
8970 BLX <Rm> which is BLX(2)
8971 Unfortunately, there are two different opcodes for this mnemonic.
8972 So, the insns[].value is not used, and the code here zaps values
8973 into inst.instruction.
8974
8975 ??? How to take advantage of the additional two bits of displacement
8976 available in Thumb32 mode? Need new relocation? */
8977
8978 static void
8979 do_t_blx (void)
8980 {
8981 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8982 if (inst.operands[0].isreg)
8983 {
8984 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8985 /* We have a register, so this is BLX(2). */
8986 inst.instruction |= inst.operands[0].reg << 3;
8987 }
8988 else
8989 {
8990 /* No register. This must be BLX(1). */
8991 inst.instruction = 0xf000e800;
8992 #ifdef OBJ_ELF
8993 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8994 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8995 else
8996 #endif
8997 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8998 inst.reloc.pc_rel = 1;
8999 }
9000 }
9001
9002 static void
9003 do_t_branch (void)
9004 {
9005 int opcode;
9006 int cond;
9007
9008 if (current_it_mask)
9009 {
9010 /* Conditional branches inside IT blocks are encoded as unconditional
9011 branches. */
9012 cond = COND_ALWAYS;
9013 /* A branch must be the last instruction in an IT block. */
9014 constraint (current_it_mask != 0x10, BAD_BRANCH);
9015 }
9016 else
9017 cond = inst.cond;
9018
9019 if (cond != COND_ALWAYS)
9020 opcode = T_MNEM_bcond;
9021 else
9022 opcode = inst.instruction;
9023
9024 if (unified_syntax && inst.size_req == 4)
9025 {
9026 inst.instruction = THUMB_OP32(opcode);
9027 if (cond == COND_ALWAYS)
9028 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
9029 else
9030 {
9031 assert (cond != 0xF);
9032 inst.instruction |= cond << 22;
9033 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
9034 }
9035 }
9036 else
9037 {
9038 inst.instruction = THUMB_OP16(opcode);
9039 if (cond == COND_ALWAYS)
9040 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
9041 else
9042 {
9043 inst.instruction |= cond << 8;
9044 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
9045 }
9046 /* Allow section relaxation. */
9047 if (unified_syntax && inst.size_req != 2)
9048 inst.relax = opcode;
9049 }
9050
9051 inst.reloc.pc_rel = 1;
9052 }
9053
9054 static void
9055 do_t_bkpt (void)
9056 {
9057 constraint (inst.cond != COND_ALWAYS,
9058 _("instruction is always unconditional"));
9059 if (inst.operands[0].present)
9060 {
9061 constraint (inst.operands[0].imm > 255,
9062 _("immediate value out of range"));
9063 inst.instruction |= inst.operands[0].imm;
9064 }
9065 }
9066
9067 static void
9068 do_t_branch23 (void)
9069 {
9070 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9071 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
9072 inst.reloc.pc_rel = 1;
9073
9074 /* If the destination of the branch is a defined symbol which does not have
9075 the THUMB_FUNC attribute, then we must be calling a function which has
9076 the (interfacearm) attribute. We look for the Thumb entry point to that
9077 function and change the branch to refer to that function instead. */
9078 if ( inst.reloc.exp.X_op == O_symbol
9079 && inst.reloc.exp.X_add_symbol != NULL
9080 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
9081 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
9082 inst.reloc.exp.X_add_symbol =
9083 find_real_start (inst.reloc.exp.X_add_symbol);
9084 }
9085
9086 static void
9087 do_t_bx (void)
9088 {
9089 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9090 inst.instruction |= inst.operands[0].reg << 3;
9091 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9092 should cause the alignment to be checked once it is known. This is
9093 because BX PC only works if the instruction is word aligned. */
9094 }
9095
9096 static void
9097 do_t_bxj (void)
9098 {
9099 int Rm;
9100
9101 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9102 Rm = inst.operands[0].reg;
9103 reject_bad_reg (Rm);
9104 inst.instruction |= Rm << 16;
9105 }
9106
9107 static void
9108 do_t_clz (void)
9109 {
9110 unsigned Rd;
9111 unsigned Rm;
9112
9113 Rd = inst.operands[0].reg;
9114 Rm = inst.operands[1].reg;
9115
9116 reject_bad_reg (Rd);
9117 reject_bad_reg (Rm);
9118
9119 inst.instruction |= Rd << 8;
9120 inst.instruction |= Rm << 16;
9121 inst.instruction |= Rm;
9122 }
9123
9124 static void
9125 do_t_cps (void)
9126 {
9127 constraint (current_it_mask, BAD_NOT_IT);
9128 inst.instruction |= inst.operands[0].imm;
9129 }
9130
9131 static void
9132 do_t_cpsi (void)
9133 {
9134 constraint (current_it_mask, BAD_NOT_IT);
9135 if (unified_syntax
9136 && (inst.operands[1].present || inst.size_req == 4)
9137 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
9138 {
9139 unsigned int imod = (inst.instruction & 0x0030) >> 4;
9140 inst.instruction = 0xf3af8000;
9141 inst.instruction |= imod << 9;
9142 inst.instruction |= inst.operands[0].imm << 5;
9143 if (inst.operands[1].present)
9144 inst.instruction |= 0x100 | inst.operands[1].imm;
9145 }
9146 else
9147 {
9148 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
9149 && (inst.operands[0].imm & 4),
9150 _("selected processor does not support 'A' form "
9151 "of this instruction"));
9152 constraint (inst.operands[1].present || inst.size_req == 4,
9153 _("Thumb does not support the 2-argument "
9154 "form of this instruction"));
9155 inst.instruction |= inst.operands[0].imm;
9156 }
9157 }
9158
9159 /* THUMB CPY instruction (argument parse). */
9160
9161 static void
9162 do_t_cpy (void)
9163 {
9164 if (inst.size_req == 4)
9165 {
9166 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9167 inst.instruction |= inst.operands[0].reg << 8;
9168 inst.instruction |= inst.operands[1].reg;
9169 }
9170 else
9171 {
9172 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9173 inst.instruction |= (inst.operands[0].reg & 0x7);
9174 inst.instruction |= inst.operands[1].reg << 3;
9175 }
9176 }
9177
9178 static void
9179 do_t_cbz (void)
9180 {
9181 constraint (current_it_mask, BAD_NOT_IT);
9182 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9183 inst.instruction |= inst.operands[0].reg;
9184 inst.reloc.pc_rel = 1;
9185 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9186 }
9187
9188 static void
9189 do_t_dbg (void)
9190 {
9191 inst.instruction |= inst.operands[0].imm;
9192 }
9193
9194 static void
9195 do_t_div (void)
9196 {
9197 unsigned Rd, Rn, Rm;
9198
9199 Rd = inst.operands[0].reg;
9200 Rn = (inst.operands[1].present
9201 ? inst.operands[1].reg : Rd);
9202 Rm = inst.operands[2].reg;
9203
9204 reject_bad_reg (Rd);
9205 reject_bad_reg (Rn);
9206 reject_bad_reg (Rm);
9207
9208 inst.instruction |= Rd << 8;
9209 inst.instruction |= Rn << 16;
9210 inst.instruction |= Rm;
9211 }
9212
9213 static void
9214 do_t_hint (void)
9215 {
9216 if (unified_syntax && inst.size_req == 4)
9217 inst.instruction = THUMB_OP32 (inst.instruction);
9218 else
9219 inst.instruction = THUMB_OP16 (inst.instruction);
9220 }
9221
9222 static void
9223 do_t_it (void)
9224 {
9225 unsigned int cond = inst.operands[0].imm;
9226
9227 constraint (current_it_mask, BAD_NOT_IT);
9228 current_it_mask = (inst.instruction & 0xf) | 0x10;
9229 current_cc = cond;
9230
9231 /* If the condition is a negative condition, invert the mask. */
9232 if ((cond & 0x1) == 0x0)
9233 {
9234 unsigned int mask = inst.instruction & 0x000f;
9235
9236 if ((mask & 0x7) == 0)
9237 /* no conversion needed */;
9238 else if ((mask & 0x3) == 0)
9239 mask ^= 0x8;
9240 else if ((mask & 0x1) == 0)
9241 mask ^= 0xC;
9242 else
9243 mask ^= 0xE;
9244
9245 inst.instruction &= 0xfff0;
9246 inst.instruction |= mask;
9247 }
9248
9249 inst.instruction |= cond << 4;
9250 }
9251
9252 /* Helper function used for both push/pop and ldm/stm. */
9253 static void
9254 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9255 {
9256 bfd_boolean load;
9257
9258 load = (inst.instruction & (1 << 20)) != 0;
9259
9260 if (mask & (1 << 13))
9261 inst.error = _("SP not allowed in register list");
9262 if (load)
9263 {
9264 if (mask & (1 << 14)
9265 && mask & (1 << 15))
9266 inst.error = _("LR and PC should not both be in register list");
9267
9268 if ((mask & (1 << base)) != 0
9269 && writeback)
9270 as_warn (_("base register should not be in register list "
9271 "when written back"));
9272 }
9273 else
9274 {
9275 if (mask & (1 << 15))
9276 inst.error = _("PC not allowed in register list");
9277
9278 if (mask & (1 << base))
9279 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9280 }
9281
9282 if ((mask & (mask - 1)) == 0)
9283 {
9284 /* Single register transfers implemented as str/ldr. */
9285 if (writeback)
9286 {
9287 if (inst.instruction & (1 << 23))
9288 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9289 else
9290 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9291 }
9292 else
9293 {
9294 if (inst.instruction & (1 << 23))
9295 inst.instruction = 0x00800000; /* ia -> [base] */
9296 else
9297 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9298 }
9299
9300 inst.instruction |= 0xf8400000;
9301 if (load)
9302 inst.instruction |= 0x00100000;
9303
9304 mask = ffs (mask) - 1;
9305 mask <<= 12;
9306 }
9307 else if (writeback)
9308 inst.instruction |= WRITE_BACK;
9309
9310 inst.instruction |= mask;
9311 inst.instruction |= base << 16;
9312 }
9313
9314 static void
9315 do_t_ldmstm (void)
9316 {
9317 /* This really doesn't seem worth it. */
9318 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9319 _("expression too complex"));
9320 constraint (inst.operands[1].writeback,
9321 _("Thumb load/store multiple does not support {reglist}^"));
9322
9323 if (unified_syntax)
9324 {
9325 bfd_boolean narrow;
9326 unsigned mask;
9327
9328 narrow = FALSE;
9329 /* See if we can use a 16-bit instruction. */
9330 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9331 && inst.size_req != 4
9332 && !(inst.operands[1].imm & ~0xff))
9333 {
9334 mask = 1 << inst.operands[0].reg;
9335
9336 if (inst.operands[0].reg <= 7
9337 && (inst.instruction == T_MNEM_stmia
9338 ? inst.operands[0].writeback
9339 : (inst.operands[0].writeback
9340 == !(inst.operands[1].imm & mask))))
9341 {
9342 if (inst.instruction == T_MNEM_stmia
9343 && (inst.operands[1].imm & mask)
9344 && (inst.operands[1].imm & (mask - 1)))
9345 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9346 inst.operands[0].reg);
9347
9348 inst.instruction = THUMB_OP16 (inst.instruction);
9349 inst.instruction |= inst.operands[0].reg << 8;
9350 inst.instruction |= inst.operands[1].imm;
9351 narrow = TRUE;
9352 }
9353 else if (inst.operands[0] .reg == REG_SP
9354 && inst.operands[0].writeback)
9355 {
9356 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9357 ? T_MNEM_push : T_MNEM_pop);
9358 inst.instruction |= inst.operands[1].imm;
9359 narrow = TRUE;
9360 }
9361 }
9362
9363 if (!narrow)
9364 {
9365 if (inst.instruction < 0xffff)
9366 inst.instruction = THUMB_OP32 (inst.instruction);
9367
9368 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
9369 inst.operands[0].writeback);
9370 }
9371 }
9372 else
9373 {
9374 constraint (inst.operands[0].reg > 7
9375 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9376 constraint (inst.instruction != T_MNEM_ldmia
9377 && inst.instruction != T_MNEM_stmia,
9378 _("Thumb-2 instruction only valid in unified syntax"));
9379 if (inst.instruction == T_MNEM_stmia)
9380 {
9381 if (!inst.operands[0].writeback)
9382 as_warn (_("this instruction will write back the base register"));
9383 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9384 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9385 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9386 inst.operands[0].reg);
9387 }
9388 else
9389 {
9390 if (!inst.operands[0].writeback
9391 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9392 as_warn (_("this instruction will write back the base register"));
9393 else if (inst.operands[0].writeback
9394 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9395 as_warn (_("this instruction will not write back the base register"));
9396 }
9397
9398 inst.instruction = THUMB_OP16 (inst.instruction);
9399 inst.instruction |= inst.operands[0].reg << 8;
9400 inst.instruction |= inst.operands[1].imm;
9401 }
9402 }
9403
9404 static void
9405 do_t_ldrex (void)
9406 {
9407 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9408 || inst.operands[1].postind || inst.operands[1].writeback
9409 || inst.operands[1].immisreg || inst.operands[1].shifted
9410 || inst.operands[1].negative,
9411 BAD_ADDR_MODE);
9412
9413 inst.instruction |= inst.operands[0].reg << 12;
9414 inst.instruction |= inst.operands[1].reg << 16;
9415 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9416 }
9417
9418 static void
9419 do_t_ldrexd (void)
9420 {
9421 if (!inst.operands[1].present)
9422 {
9423 constraint (inst.operands[0].reg == REG_LR,
9424 _("r14 not allowed as first register "
9425 "when second register is omitted"));
9426 inst.operands[1].reg = inst.operands[0].reg + 1;
9427 }
9428 constraint (inst.operands[0].reg == inst.operands[1].reg,
9429 BAD_OVERLAP);
9430
9431 inst.instruction |= inst.operands[0].reg << 12;
9432 inst.instruction |= inst.operands[1].reg << 8;
9433 inst.instruction |= inst.operands[2].reg << 16;
9434 }
9435
9436 static void
9437 do_t_ldst (void)
9438 {
9439 unsigned long opcode;
9440 int Rn;
9441
9442 opcode = inst.instruction;
9443 if (unified_syntax)
9444 {
9445 if (!inst.operands[1].isreg)
9446 {
9447 if (opcode <= 0xffff)
9448 inst.instruction = THUMB_OP32 (opcode);
9449 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9450 return;
9451 }
9452 if (inst.operands[1].isreg
9453 && !inst.operands[1].writeback
9454 && !inst.operands[1].shifted && !inst.operands[1].postind
9455 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9456 && opcode <= 0xffff
9457 && inst.size_req != 4)
9458 {
9459 /* Insn may have a 16-bit form. */
9460 Rn = inst.operands[1].reg;
9461 if (inst.operands[1].immisreg)
9462 {
9463 inst.instruction = THUMB_OP16 (opcode);
9464 /* [Rn, Rik] */
9465 if (Rn <= 7 && inst.operands[1].imm <= 7)
9466 goto op16;
9467 }
9468 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9469 && opcode != T_MNEM_ldrsb)
9470 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9471 || (Rn == REG_SP && opcode == T_MNEM_str))
9472 {
9473 /* [Rn, #const] */
9474 if (Rn > 7)
9475 {
9476 if (Rn == REG_PC)
9477 {
9478 if (inst.reloc.pc_rel)
9479 opcode = T_MNEM_ldr_pc2;
9480 else
9481 opcode = T_MNEM_ldr_pc;
9482 }
9483 else
9484 {
9485 if (opcode == T_MNEM_ldr)
9486 opcode = T_MNEM_ldr_sp;
9487 else
9488 opcode = T_MNEM_str_sp;
9489 }
9490 inst.instruction = inst.operands[0].reg << 8;
9491 }
9492 else
9493 {
9494 inst.instruction = inst.operands[0].reg;
9495 inst.instruction |= inst.operands[1].reg << 3;
9496 }
9497 inst.instruction |= THUMB_OP16 (opcode);
9498 if (inst.size_req == 2)
9499 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9500 else
9501 inst.relax = opcode;
9502 return;
9503 }
9504 }
9505 /* Definitely a 32-bit variant. */
9506 inst.instruction = THUMB_OP32 (opcode);
9507 inst.instruction |= inst.operands[0].reg << 12;
9508 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9509 return;
9510 }
9511
9512 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9513
9514 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9515 {
9516 /* Only [Rn,Rm] is acceptable. */
9517 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9518 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9519 || inst.operands[1].postind || inst.operands[1].shifted
9520 || inst.operands[1].negative,
9521 _("Thumb does not support this addressing mode"));
9522 inst.instruction = THUMB_OP16 (inst.instruction);
9523 goto op16;
9524 }
9525
9526 inst.instruction = THUMB_OP16 (inst.instruction);
9527 if (!inst.operands[1].isreg)
9528 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9529 return;
9530
9531 constraint (!inst.operands[1].preind
9532 || inst.operands[1].shifted
9533 || inst.operands[1].writeback,
9534 _("Thumb does not support this addressing mode"));
9535 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9536 {
9537 constraint (inst.instruction & 0x0600,
9538 _("byte or halfword not valid for base register"));
9539 constraint (inst.operands[1].reg == REG_PC
9540 && !(inst.instruction & THUMB_LOAD_BIT),
9541 _("r15 based store not allowed"));
9542 constraint (inst.operands[1].immisreg,
9543 _("invalid base register for register offset"));
9544
9545 if (inst.operands[1].reg == REG_PC)
9546 inst.instruction = T_OPCODE_LDR_PC;
9547 else if (inst.instruction & THUMB_LOAD_BIT)
9548 inst.instruction = T_OPCODE_LDR_SP;
9549 else
9550 inst.instruction = T_OPCODE_STR_SP;
9551
9552 inst.instruction |= inst.operands[0].reg << 8;
9553 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9554 return;
9555 }
9556
9557 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9558 if (!inst.operands[1].immisreg)
9559 {
9560 /* Immediate offset. */
9561 inst.instruction |= inst.operands[0].reg;
9562 inst.instruction |= inst.operands[1].reg << 3;
9563 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9564 return;
9565 }
9566
9567 /* Register offset. */
9568 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9569 constraint (inst.operands[1].negative,
9570 _("Thumb does not support this addressing mode"));
9571
9572 op16:
9573 switch (inst.instruction)
9574 {
9575 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9576 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9577 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9578 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9579 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9580 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9581 case 0x5600 /* ldrsb */:
9582 case 0x5e00 /* ldrsh */: break;
9583 default: abort ();
9584 }
9585
9586 inst.instruction |= inst.operands[0].reg;
9587 inst.instruction |= inst.operands[1].reg << 3;
9588 inst.instruction |= inst.operands[1].imm << 6;
9589 }
9590
9591 static void
9592 do_t_ldstd (void)
9593 {
9594 if (!inst.operands[1].present)
9595 {
9596 inst.operands[1].reg = inst.operands[0].reg + 1;
9597 constraint (inst.operands[0].reg == REG_LR,
9598 _("r14 not allowed here"));
9599 }
9600 inst.instruction |= inst.operands[0].reg << 12;
9601 inst.instruction |= inst.operands[1].reg << 8;
9602 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9603 }
9604
9605 static void
9606 do_t_ldstt (void)
9607 {
9608 inst.instruction |= inst.operands[0].reg << 12;
9609 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9610 }
9611
9612 static void
9613 do_t_mla (void)
9614 {
9615 unsigned Rd, Rn, Rm, Ra;
9616
9617 Rd = inst.operands[0].reg;
9618 Rn = inst.operands[1].reg;
9619 Rm = inst.operands[2].reg;
9620 Ra = inst.operands[3].reg;
9621
9622 reject_bad_reg (Rd);
9623 reject_bad_reg (Rn);
9624 reject_bad_reg (Rm);
9625 reject_bad_reg (Ra);
9626
9627 inst.instruction |= Rd << 8;
9628 inst.instruction |= Rn << 16;
9629 inst.instruction |= Rm;
9630 inst.instruction |= Ra << 12;
9631 }
9632
9633 static void
9634 do_t_mlal (void)
9635 {
9636 unsigned RdLo, RdHi, Rn, Rm;
9637
9638 RdLo = inst.operands[0].reg;
9639 RdHi = inst.operands[1].reg;
9640 Rn = inst.operands[2].reg;
9641 Rm = inst.operands[3].reg;
9642
9643 reject_bad_reg (RdLo);
9644 reject_bad_reg (RdHi);
9645 reject_bad_reg (Rn);
9646 reject_bad_reg (Rm);
9647
9648 inst.instruction |= RdLo << 12;
9649 inst.instruction |= RdHi << 8;
9650 inst.instruction |= Rn << 16;
9651 inst.instruction |= Rm;
9652 }
9653
9654 static void
9655 do_t_mov_cmp (void)
9656 {
9657 unsigned Rn, Rm;
9658
9659 Rn = inst.operands[0].reg;
9660 Rm = inst.operands[1].reg;
9661
9662 if (unified_syntax)
9663 {
9664 int r0off = (inst.instruction == T_MNEM_mov
9665 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9666 unsigned long opcode;
9667 bfd_boolean narrow;
9668 bfd_boolean low_regs;
9669
9670 low_regs = (Rn <= 7 && Rm <= 7);
9671 opcode = inst.instruction;
9672 if (current_it_mask)
9673 narrow = opcode != T_MNEM_movs;
9674 else
9675 narrow = opcode != T_MNEM_movs || low_regs;
9676 if (inst.size_req == 4
9677 || inst.operands[1].shifted)
9678 narrow = FALSE;
9679
9680 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
9681 if (opcode == T_MNEM_movs && inst.operands[1].isreg
9682 && !inst.operands[1].shifted
9683 && Rn == REG_PC
9684 && Rm == REG_LR)
9685 {
9686 inst.instruction = T2_SUBS_PC_LR;
9687 return;
9688 }
9689
9690 if (opcode == T_MNEM_cmp)
9691 {
9692 constraint (Rn == REG_PC, BAD_PC);
9693 reject_bad_reg (Rm);
9694 }
9695 else if (opcode == T_MNEM_mov
9696 || opcode == T_MNEM_movs)
9697 {
9698 if (inst.operands[1].isreg)
9699 {
9700 if (opcode == T_MNEM_movs)
9701 {
9702 reject_bad_reg (Rn);
9703 reject_bad_reg (Rm);
9704 }
9705 else if ((Rn == REG_SP || Rn == REG_PC)
9706 && (Rm == REG_SP || Rm == REG_PC))
9707 reject_bad_reg (Rm);
9708 }
9709 else
9710 reject_bad_reg (Rn);
9711 }
9712
9713 if (!inst.operands[1].isreg)
9714 {
9715 /* Immediate operand. */
9716 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9717 narrow = 0;
9718 if (low_regs && narrow)
9719 {
9720 inst.instruction = THUMB_OP16 (opcode);
9721 inst.instruction |= Rn << 8;
9722 if (inst.size_req == 2)
9723 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9724 else
9725 inst.relax = opcode;
9726 }
9727 else
9728 {
9729 inst.instruction = THUMB_OP32 (inst.instruction);
9730 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9731 inst.instruction |= Rn << r0off;
9732 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9733 }
9734 }
9735 else if (inst.operands[1].shifted && inst.operands[1].immisreg
9736 && (inst.instruction == T_MNEM_mov
9737 || inst.instruction == T_MNEM_movs))
9738 {
9739 /* Register shifts are encoded as separate shift instructions. */
9740 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
9741
9742 if (current_it_mask)
9743 narrow = !flags;
9744 else
9745 narrow = flags;
9746
9747 if (inst.size_req == 4)
9748 narrow = FALSE;
9749
9750 if (!low_regs || inst.operands[1].imm > 7)
9751 narrow = FALSE;
9752
9753 if (Rn != Rm)
9754 narrow = FALSE;
9755
9756 switch (inst.operands[1].shift_kind)
9757 {
9758 case SHIFT_LSL:
9759 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
9760 break;
9761 case SHIFT_ASR:
9762 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
9763 break;
9764 case SHIFT_LSR:
9765 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
9766 break;
9767 case SHIFT_ROR:
9768 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
9769 break;
9770 default:
9771 abort ();
9772 }
9773
9774 inst.instruction = opcode;
9775 if (narrow)
9776 {
9777 inst.instruction |= Rn;
9778 inst.instruction |= inst.operands[1].imm << 3;
9779 }
9780 else
9781 {
9782 if (flags)
9783 inst.instruction |= CONDS_BIT;
9784
9785 inst.instruction |= Rn << 8;
9786 inst.instruction |= Rm << 16;
9787 inst.instruction |= inst.operands[1].imm;
9788 }
9789 }
9790 else if (!narrow)
9791 {
9792 /* Some mov with immediate shift have narrow variants.
9793 Register shifts are handled above. */
9794 if (low_regs && inst.operands[1].shifted
9795 && (inst.instruction == T_MNEM_mov
9796 || inst.instruction == T_MNEM_movs))
9797 {
9798 if (current_it_mask)
9799 narrow = (inst.instruction == T_MNEM_mov);
9800 else
9801 narrow = (inst.instruction == T_MNEM_movs);
9802 }
9803
9804 if (narrow)
9805 {
9806 switch (inst.operands[1].shift_kind)
9807 {
9808 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9809 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9810 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9811 default: narrow = FALSE; break;
9812 }
9813 }
9814
9815 if (narrow)
9816 {
9817 inst.instruction |= Rn;
9818 inst.instruction |= Rm << 3;
9819 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9820 }
9821 else
9822 {
9823 inst.instruction = THUMB_OP32 (inst.instruction);
9824 inst.instruction |= Rn << r0off;
9825 encode_thumb32_shifted_operand (1);
9826 }
9827 }
9828 else
9829 switch (inst.instruction)
9830 {
9831 case T_MNEM_mov:
9832 inst.instruction = T_OPCODE_MOV_HR;
9833 inst.instruction |= (Rn & 0x8) << 4;
9834 inst.instruction |= (Rn & 0x7);
9835 inst.instruction |= Rm << 3;
9836 break;
9837
9838 case T_MNEM_movs:
9839 /* We know we have low registers at this point.
9840 Generate ADD Rd, Rs, #0. */
9841 inst.instruction = T_OPCODE_ADD_I3;
9842 inst.instruction |= Rn;
9843 inst.instruction |= Rm << 3;
9844 break;
9845
9846 case T_MNEM_cmp:
9847 if (low_regs)
9848 {
9849 inst.instruction = T_OPCODE_CMP_LR;
9850 inst.instruction |= Rn;
9851 inst.instruction |= Rm << 3;
9852 }
9853 else
9854 {
9855 inst.instruction = T_OPCODE_CMP_HR;
9856 inst.instruction |= (Rn & 0x8) << 4;
9857 inst.instruction |= (Rn & 0x7);
9858 inst.instruction |= Rm << 3;
9859 }
9860 break;
9861 }
9862 return;
9863 }
9864
9865 inst.instruction = THUMB_OP16 (inst.instruction);
9866 if (inst.operands[1].isreg)
9867 {
9868 if (Rn < 8 && Rm < 8)
9869 {
9870 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9871 since a MOV instruction produces unpredictable results. */
9872 if (inst.instruction == T_OPCODE_MOV_I8)
9873 inst.instruction = T_OPCODE_ADD_I3;
9874 else
9875 inst.instruction = T_OPCODE_CMP_LR;
9876
9877 inst.instruction |= Rn;
9878 inst.instruction |= Rm << 3;
9879 }
9880 else
9881 {
9882 if (inst.instruction == T_OPCODE_MOV_I8)
9883 inst.instruction = T_OPCODE_MOV_HR;
9884 else
9885 inst.instruction = T_OPCODE_CMP_HR;
9886 do_t_cpy ();
9887 }
9888 }
9889 else
9890 {
9891 constraint (Rn > 7,
9892 _("only lo regs allowed with immediate"));
9893 inst.instruction |= Rn << 8;
9894 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9895 }
9896 }
9897
9898 static void
9899 do_t_mov16 (void)
9900 {
9901 unsigned Rd;
9902 bfd_vma imm;
9903 bfd_boolean top;
9904
9905 top = (inst.instruction & 0x00800000) != 0;
9906 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9907 {
9908 constraint (top, _(":lower16: not allowed this instruction"));
9909 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9910 }
9911 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9912 {
9913 constraint (!top, _(":upper16: not allowed this instruction"));
9914 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9915 }
9916
9917 Rd = inst.operands[0].reg;
9918 reject_bad_reg (Rd);
9919
9920 inst.instruction |= Rd << 8;
9921 if (inst.reloc.type == BFD_RELOC_UNUSED)
9922 {
9923 imm = inst.reloc.exp.X_add_number;
9924 inst.instruction |= (imm & 0xf000) << 4;
9925 inst.instruction |= (imm & 0x0800) << 15;
9926 inst.instruction |= (imm & 0x0700) << 4;
9927 inst.instruction |= (imm & 0x00ff);
9928 }
9929 }
9930
9931 static void
9932 do_t_mvn_tst (void)
9933 {
9934 unsigned Rn, Rm;
9935
9936 Rn = inst.operands[0].reg;
9937 Rm = inst.operands[1].reg;
9938
9939 if (inst.instruction == T_MNEM_cmp
9940 || inst.instruction == T_MNEM_cmn)
9941 constraint (Rn == REG_PC, BAD_PC);
9942 else
9943 reject_bad_reg (Rn);
9944 reject_bad_reg (Rm);
9945
9946 if (unified_syntax)
9947 {
9948 int r0off = (inst.instruction == T_MNEM_mvn
9949 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9950 bfd_boolean narrow;
9951
9952 if (inst.size_req == 4
9953 || inst.instruction > 0xffff
9954 || inst.operands[1].shifted
9955 || Rn > 7 || Rm > 7)
9956 narrow = FALSE;
9957 else if (inst.instruction == T_MNEM_cmn)
9958 narrow = TRUE;
9959 else if (THUMB_SETS_FLAGS (inst.instruction))
9960 narrow = (current_it_mask == 0);
9961 else
9962 narrow = (current_it_mask != 0);
9963
9964 if (!inst.operands[1].isreg)
9965 {
9966 /* For an immediate, we always generate a 32-bit opcode;
9967 section relaxation will shrink it later if possible. */
9968 if (inst.instruction < 0xffff)
9969 inst.instruction = THUMB_OP32 (inst.instruction);
9970 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9971 inst.instruction |= Rn << r0off;
9972 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9973 }
9974 else
9975 {
9976 /* See if we can do this with a 16-bit instruction. */
9977 if (narrow)
9978 {
9979 inst.instruction = THUMB_OP16 (inst.instruction);
9980 inst.instruction |= Rn;
9981 inst.instruction |= Rm << 3;
9982 }
9983 else
9984 {
9985 constraint (inst.operands[1].shifted
9986 && inst.operands[1].immisreg,
9987 _("shift must be constant"));
9988 if (inst.instruction < 0xffff)
9989 inst.instruction = THUMB_OP32 (inst.instruction);
9990 inst.instruction |= Rn << r0off;
9991 encode_thumb32_shifted_operand (1);
9992 }
9993 }
9994 }
9995 else
9996 {
9997 constraint (inst.instruction > 0xffff
9998 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9999 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
10000 _("unshifted register required"));
10001 constraint (Rn > 7 || Rm > 7,
10002 BAD_HIREG);
10003
10004 inst.instruction = THUMB_OP16 (inst.instruction);
10005 inst.instruction |= Rn;
10006 inst.instruction |= Rm << 3;
10007 }
10008 }
10009
10010 static void
10011 do_t_mrs (void)
10012 {
10013 unsigned Rd;
10014 int flags;
10015
10016 if (do_vfp_nsyn_mrs () == SUCCESS)
10017 return;
10018
10019 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
10020 if (flags == 0)
10021 {
10022 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10023 _("selected processor does not support "
10024 "requested special purpose register"));
10025 }
10026 else
10027 {
10028 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10029 _("selected processor does not support "
10030 "requested special purpose register"));
10031 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10032 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
10033 _("'CPSR' or 'SPSR' expected"));
10034 }
10035
10036 Rd = inst.operands[0].reg;
10037 reject_bad_reg (Rd);
10038
10039 inst.instruction |= Rd << 8;
10040 inst.instruction |= (flags & SPSR_BIT) >> 2;
10041 inst.instruction |= inst.operands[1].imm & 0xff;
10042 }
10043
10044 static void
10045 do_t_msr (void)
10046 {
10047 int flags;
10048 unsigned Rn;
10049
10050 if (do_vfp_nsyn_msr () == SUCCESS)
10051 return;
10052
10053 constraint (!inst.operands[1].isreg,
10054 _("Thumb encoding does not support an immediate here"));
10055 flags = inst.operands[0].imm;
10056 if (flags & ~0xff)
10057 {
10058 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10059 _("selected processor does not support "
10060 "requested special purpose register"));
10061 }
10062 else
10063 {
10064 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10065 _("selected processor does not support "
10066 "requested special purpose register"));
10067 flags |= PSR_f;
10068 }
10069
10070 Rn = inst.operands[1].reg;
10071 reject_bad_reg (Rn);
10072
10073 inst.instruction |= (flags & SPSR_BIT) >> 2;
10074 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
10075 inst.instruction |= (flags & 0xff);
10076 inst.instruction |= Rn << 16;
10077 }
10078
10079 static void
10080 do_t_mul (void)
10081 {
10082 bfd_boolean narrow;
10083 unsigned Rd, Rn, Rm;
10084
10085 if (!inst.operands[2].present)
10086 inst.operands[2].reg = inst.operands[0].reg;
10087
10088 Rd = inst.operands[0].reg;
10089 Rn = inst.operands[1].reg;
10090 Rm = inst.operands[2].reg;
10091
10092 if (unified_syntax)
10093 {
10094 if (inst.size_req == 4
10095 || (Rd != Rn
10096 && Rd != Rm)
10097 || Rn > 7
10098 || Rm > 7)
10099 narrow = FALSE;
10100 else if (inst.instruction == T_MNEM_muls)
10101 narrow = (current_it_mask == 0);
10102 else
10103 narrow = (current_it_mask != 0);
10104 }
10105 else
10106 {
10107 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
10108 constraint (Rn > 7 || Rm > 7,
10109 BAD_HIREG);
10110 narrow = TRUE;
10111 }
10112
10113 if (narrow)
10114 {
10115 /* 16-bit MULS/Conditional MUL. */
10116 inst.instruction = THUMB_OP16 (inst.instruction);
10117 inst.instruction |= Rd;
10118
10119 if (Rd == Rn)
10120 inst.instruction |= Rm << 3;
10121 else if (Rd == Rm)
10122 inst.instruction |= Rn << 3;
10123 else
10124 constraint (1, _("dest must overlap one source register"));
10125 }
10126 else
10127 {
10128 constraint(inst.instruction != T_MNEM_mul,
10129 _("Thumb-2 MUL must not set flags"));
10130 /* 32-bit MUL. */
10131 inst.instruction = THUMB_OP32 (inst.instruction);
10132 inst.instruction |= Rd << 8;
10133 inst.instruction |= Rn << 16;
10134 inst.instruction |= Rm << 0;
10135
10136 reject_bad_reg (Rd);
10137 reject_bad_reg (Rn);
10138 reject_bad_reg (Rm);
10139 }
10140 }
10141
10142 static void
10143 do_t_mull (void)
10144 {
10145 unsigned RdLo, RdHi, Rn, Rm;
10146
10147 RdLo = inst.operands[0].reg;
10148 RdHi = inst.operands[1].reg;
10149 Rn = inst.operands[2].reg;
10150 Rm = inst.operands[3].reg;
10151
10152 reject_bad_reg (RdLo);
10153 reject_bad_reg (RdHi);
10154 reject_bad_reg (Rn);
10155 reject_bad_reg (Rm);
10156
10157 inst.instruction |= RdLo << 12;
10158 inst.instruction |= RdHi << 8;
10159 inst.instruction |= Rn << 16;
10160 inst.instruction |= Rm;
10161
10162 if (RdLo == RdHi)
10163 as_tsktsk (_("rdhi and rdlo must be different"));
10164 }
10165
10166 static void
10167 do_t_nop (void)
10168 {
10169 if (unified_syntax)
10170 {
10171 if (inst.size_req == 4 || inst.operands[0].imm > 15)
10172 {
10173 inst.instruction = THUMB_OP32 (inst.instruction);
10174 inst.instruction |= inst.operands[0].imm;
10175 }
10176 else
10177 {
10178 /* PR9722: Check for Thumb2 availability before
10179 generating a thumb2 nop instruction. */
10180 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
10181 {
10182 inst.instruction = THUMB_OP16 (inst.instruction);
10183 inst.instruction |= inst.operands[0].imm << 4;
10184 }
10185 else
10186 inst.instruction = 0x46c0;
10187 }
10188 }
10189 else
10190 {
10191 constraint (inst.operands[0].present,
10192 _("Thumb does not support NOP with hints"));
10193 inst.instruction = 0x46c0;
10194 }
10195 }
10196
10197 static void
10198 do_t_neg (void)
10199 {
10200 if (unified_syntax)
10201 {
10202 bfd_boolean narrow;
10203
10204 if (THUMB_SETS_FLAGS (inst.instruction))
10205 narrow = (current_it_mask == 0);
10206 else
10207 narrow = (current_it_mask != 0);
10208 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10209 narrow = FALSE;
10210 if (inst.size_req == 4)
10211 narrow = FALSE;
10212
10213 if (!narrow)
10214 {
10215 inst.instruction = THUMB_OP32 (inst.instruction);
10216 inst.instruction |= inst.operands[0].reg << 8;
10217 inst.instruction |= inst.operands[1].reg << 16;
10218 }
10219 else
10220 {
10221 inst.instruction = THUMB_OP16 (inst.instruction);
10222 inst.instruction |= inst.operands[0].reg;
10223 inst.instruction |= inst.operands[1].reg << 3;
10224 }
10225 }
10226 else
10227 {
10228 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
10229 BAD_HIREG);
10230 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10231
10232 inst.instruction = THUMB_OP16 (inst.instruction);
10233 inst.instruction |= inst.operands[0].reg;
10234 inst.instruction |= inst.operands[1].reg << 3;
10235 }
10236 }
10237
10238 static void
10239 do_t_orn (void)
10240 {
10241 unsigned Rd, Rn;
10242
10243 Rd = inst.operands[0].reg;
10244 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
10245
10246 reject_bad_reg (Rd);
10247 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10248 reject_bad_reg (Rn);
10249
10250 inst.instruction |= Rd << 8;
10251 inst.instruction |= Rn << 16;
10252
10253 if (!inst.operands[2].isreg)
10254 {
10255 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10256 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10257 }
10258 else
10259 {
10260 unsigned Rm;
10261
10262 Rm = inst.operands[2].reg;
10263 reject_bad_reg (Rm);
10264
10265 constraint (inst.operands[2].shifted
10266 && inst.operands[2].immisreg,
10267 _("shift must be constant"));
10268 encode_thumb32_shifted_operand (2);
10269 }
10270 }
10271
10272 static void
10273 do_t_pkhbt (void)
10274 {
10275 unsigned Rd, Rn, Rm;
10276
10277 Rd = inst.operands[0].reg;
10278 Rn = inst.operands[1].reg;
10279 Rm = inst.operands[2].reg;
10280
10281 reject_bad_reg (Rd);
10282 reject_bad_reg (Rn);
10283 reject_bad_reg (Rm);
10284
10285 inst.instruction |= Rd << 8;
10286 inst.instruction |= Rn << 16;
10287 inst.instruction |= Rm;
10288 if (inst.operands[3].present)
10289 {
10290 unsigned int val = inst.reloc.exp.X_add_number;
10291 constraint (inst.reloc.exp.X_op != O_constant,
10292 _("expression too complex"));
10293 inst.instruction |= (val & 0x1c) << 10;
10294 inst.instruction |= (val & 0x03) << 6;
10295 }
10296 }
10297
10298 static void
10299 do_t_pkhtb (void)
10300 {
10301 if (!inst.operands[3].present)
10302 inst.instruction &= ~0x00000020;
10303 do_t_pkhbt ();
10304 }
10305
10306 static void
10307 do_t_pld (void)
10308 {
10309 if (inst.operands[0].immisreg)
10310 reject_bad_reg (inst.operands[0].imm);
10311
10312 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
10313 }
10314
10315 static void
10316 do_t_push_pop (void)
10317 {
10318 unsigned mask;
10319
10320 constraint (inst.operands[0].writeback,
10321 _("push/pop do not support {reglist}^"));
10322 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10323 _("expression too complex"));
10324
10325 mask = inst.operands[0].imm;
10326 if ((mask & ~0xff) == 0)
10327 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
10328 else if ((inst.instruction == T_MNEM_push
10329 && (mask & ~0xff) == 1 << REG_LR)
10330 || (inst.instruction == T_MNEM_pop
10331 && (mask & ~0xff) == 1 << REG_PC))
10332 {
10333 inst.instruction = THUMB_OP16 (inst.instruction);
10334 inst.instruction |= THUMB_PP_PC_LR;
10335 inst.instruction |= mask & 0xff;
10336 }
10337 else if (unified_syntax)
10338 {
10339 inst.instruction = THUMB_OP32 (inst.instruction);
10340 encode_thumb2_ldmstm (13, mask, TRUE);
10341 }
10342 else
10343 {
10344 inst.error = _("invalid register list to push/pop instruction");
10345 return;
10346 }
10347 }
10348
10349 static void
10350 do_t_rbit (void)
10351 {
10352 unsigned Rd, Rm;
10353
10354 Rd = inst.operands[0].reg;
10355 Rm = inst.operands[1].reg;
10356
10357 reject_bad_reg (Rd);
10358 reject_bad_reg (Rm);
10359
10360 inst.instruction |= Rd << 8;
10361 inst.instruction |= Rm << 16;
10362 inst.instruction |= Rm;
10363 }
10364
10365 static void
10366 do_t_rev (void)
10367 {
10368 unsigned Rd, Rm;
10369
10370 Rd = inst.operands[0].reg;
10371 Rm = inst.operands[1].reg;
10372
10373 reject_bad_reg (Rd);
10374 reject_bad_reg (Rm);
10375
10376 if (Rd <= 7 && Rm <= 7
10377 && inst.size_req != 4)
10378 {
10379 inst.instruction = THUMB_OP16 (inst.instruction);
10380 inst.instruction |= Rd;
10381 inst.instruction |= Rm << 3;
10382 }
10383 else if (unified_syntax)
10384 {
10385 inst.instruction = THUMB_OP32 (inst.instruction);
10386 inst.instruction |= Rd << 8;
10387 inst.instruction |= Rm << 16;
10388 inst.instruction |= Rm;
10389 }
10390 else
10391 inst.error = BAD_HIREG;
10392 }
10393
10394 static void
10395 do_t_rrx (void)
10396 {
10397 unsigned Rd, Rm;
10398
10399 Rd = inst.operands[0].reg;
10400 Rm = inst.operands[1].reg;
10401
10402 reject_bad_reg (Rd);
10403 reject_bad_reg (Rm);
10404
10405 inst.instruction |= Rd << 8;
10406 inst.instruction |= Rm;
10407 }
10408
10409 static void
10410 do_t_rsb (void)
10411 {
10412 unsigned Rd, Rs;
10413
10414 Rd = inst.operands[0].reg;
10415 Rs = (inst.operands[1].present
10416 ? inst.operands[1].reg /* Rd, Rs, foo */
10417 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10418
10419 reject_bad_reg (Rd);
10420 reject_bad_reg (Rs);
10421 if (inst.operands[2].isreg)
10422 reject_bad_reg (inst.operands[2].reg);
10423
10424 inst.instruction |= Rd << 8;
10425 inst.instruction |= Rs << 16;
10426 if (!inst.operands[2].isreg)
10427 {
10428 bfd_boolean narrow;
10429
10430 if ((inst.instruction & 0x00100000) != 0)
10431 narrow = (current_it_mask == 0);
10432 else
10433 narrow = (current_it_mask != 0);
10434
10435 if (Rd > 7 || Rs > 7)
10436 narrow = FALSE;
10437
10438 if (inst.size_req == 4 || !unified_syntax)
10439 narrow = FALSE;
10440
10441 if (inst.reloc.exp.X_op != O_constant
10442 || inst.reloc.exp.X_add_number != 0)
10443 narrow = FALSE;
10444
10445 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10446 relaxation, but it doesn't seem worth the hassle. */
10447 if (narrow)
10448 {
10449 inst.reloc.type = BFD_RELOC_UNUSED;
10450 inst.instruction = THUMB_OP16 (T_MNEM_negs);
10451 inst.instruction |= Rs << 3;
10452 inst.instruction |= Rd;
10453 }
10454 else
10455 {
10456 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10457 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10458 }
10459 }
10460 else
10461 encode_thumb32_shifted_operand (2);
10462 }
10463
10464 static void
10465 do_t_setend (void)
10466 {
10467 constraint (current_it_mask, BAD_NOT_IT);
10468 if (inst.operands[0].imm)
10469 inst.instruction |= 0x8;
10470 }
10471
10472 static void
10473 do_t_shift (void)
10474 {
10475 if (!inst.operands[1].present)
10476 inst.operands[1].reg = inst.operands[0].reg;
10477
10478 if (unified_syntax)
10479 {
10480 bfd_boolean narrow;
10481 int shift_kind;
10482
10483 switch (inst.instruction)
10484 {
10485 case T_MNEM_asr:
10486 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
10487 case T_MNEM_lsl:
10488 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
10489 case T_MNEM_lsr:
10490 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
10491 case T_MNEM_ror:
10492 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
10493 default: abort ();
10494 }
10495
10496 if (THUMB_SETS_FLAGS (inst.instruction))
10497 narrow = (current_it_mask == 0);
10498 else
10499 narrow = (current_it_mask != 0);
10500 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10501 narrow = FALSE;
10502 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
10503 narrow = FALSE;
10504 if (inst.operands[2].isreg
10505 && (inst.operands[1].reg != inst.operands[0].reg
10506 || inst.operands[2].reg > 7))
10507 narrow = FALSE;
10508 if (inst.size_req == 4)
10509 narrow = FALSE;
10510
10511 reject_bad_reg (inst.operands[0].reg);
10512 reject_bad_reg (inst.operands[1].reg);
10513
10514 if (!narrow)
10515 {
10516 if (inst.operands[2].isreg)
10517 {
10518 reject_bad_reg (inst.operands[2].reg);
10519 inst.instruction = THUMB_OP32 (inst.instruction);
10520 inst.instruction |= inst.operands[0].reg << 8;
10521 inst.instruction |= inst.operands[1].reg << 16;
10522 inst.instruction |= inst.operands[2].reg;
10523 }
10524 else
10525 {
10526 inst.operands[1].shifted = 1;
10527 inst.operands[1].shift_kind = shift_kind;
10528 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
10529 ? T_MNEM_movs : T_MNEM_mov);
10530 inst.instruction |= inst.operands[0].reg << 8;
10531 encode_thumb32_shifted_operand (1);
10532 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10533 inst.reloc.type = BFD_RELOC_UNUSED;
10534 }
10535 }
10536 else
10537 {
10538 if (inst.operands[2].isreg)
10539 {
10540 switch (shift_kind)
10541 {
10542 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
10543 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
10544 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
10545 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
10546 default: abort ();
10547 }
10548
10549 inst.instruction |= inst.operands[0].reg;
10550 inst.instruction |= inst.operands[2].reg << 3;
10551 }
10552 else
10553 {
10554 switch (shift_kind)
10555 {
10556 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10557 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10558 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10559 default: abort ();
10560 }
10561 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10562 inst.instruction |= inst.operands[0].reg;
10563 inst.instruction |= inst.operands[1].reg << 3;
10564 }
10565 }
10566 }
10567 else
10568 {
10569 constraint (inst.operands[0].reg > 7
10570 || inst.operands[1].reg > 7, BAD_HIREG);
10571 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10572
10573 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
10574 {
10575 constraint (inst.operands[2].reg > 7, BAD_HIREG);
10576 constraint (inst.operands[0].reg != inst.operands[1].reg,
10577 _("source1 and dest must be same register"));
10578
10579 switch (inst.instruction)
10580 {
10581 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
10582 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
10583 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
10584 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10585 default: abort ();
10586 }
10587
10588 inst.instruction |= inst.operands[0].reg;
10589 inst.instruction |= inst.operands[2].reg << 3;
10590 }
10591 else
10592 {
10593 switch (inst.instruction)
10594 {
10595 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10596 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10597 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10598 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10599 default: abort ();
10600 }
10601 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10602 inst.instruction |= inst.operands[0].reg;
10603 inst.instruction |= inst.operands[1].reg << 3;
10604 }
10605 }
10606 }
10607
10608 static void
10609 do_t_simd (void)
10610 {
10611 unsigned Rd, Rn, Rm;
10612
10613 Rd = inst.operands[0].reg;
10614 Rn = inst.operands[1].reg;
10615 Rm = inst.operands[2].reg;
10616
10617 reject_bad_reg (Rd);
10618 reject_bad_reg (Rn);
10619 reject_bad_reg (Rm);
10620
10621 inst.instruction |= Rd << 8;
10622 inst.instruction |= Rn << 16;
10623 inst.instruction |= Rm;
10624 }
10625
10626 static void
10627 do_t_smc (void)
10628 {
10629 unsigned int value = inst.reloc.exp.X_add_number;
10630 constraint (inst.reloc.exp.X_op != O_constant,
10631 _("expression too complex"));
10632 inst.reloc.type = BFD_RELOC_UNUSED;
10633 inst.instruction |= (value & 0xf000) >> 12;
10634 inst.instruction |= (value & 0x0ff0);
10635 inst.instruction |= (value & 0x000f) << 16;
10636 }
10637
10638 static void
10639 do_t_ssat (void)
10640 {
10641 unsigned Rd, Rn;
10642
10643 Rd = inst.operands[0].reg;
10644 Rn = inst.operands[2].reg;
10645
10646 reject_bad_reg (Rd);
10647 reject_bad_reg (Rn);
10648
10649 inst.instruction |= Rd << 8;
10650 inst.instruction |= inst.operands[1].imm - 1;
10651 inst.instruction |= Rn << 16;
10652
10653 if (inst.operands[3].present)
10654 {
10655 constraint (inst.reloc.exp.X_op != O_constant,
10656 _("expression too complex"));
10657
10658 if (inst.reloc.exp.X_add_number != 0)
10659 {
10660 if (inst.operands[3].shift_kind == SHIFT_ASR)
10661 inst.instruction |= 0x00200000; /* sh bit */
10662 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10663 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10664 }
10665 inst.reloc.type = BFD_RELOC_UNUSED;
10666 }
10667 }
10668
10669 static void
10670 do_t_ssat16 (void)
10671 {
10672 unsigned Rd, Rn;
10673
10674 Rd = inst.operands[0].reg;
10675 Rn = inst.operands[2].reg;
10676
10677 reject_bad_reg (Rd);
10678 reject_bad_reg (Rn);
10679
10680 inst.instruction |= Rd << 8;
10681 inst.instruction |= inst.operands[1].imm - 1;
10682 inst.instruction |= Rn << 16;
10683 }
10684
10685 static void
10686 do_t_strex (void)
10687 {
10688 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10689 || inst.operands[2].postind || inst.operands[2].writeback
10690 || inst.operands[2].immisreg || inst.operands[2].shifted
10691 || inst.operands[2].negative,
10692 BAD_ADDR_MODE);
10693
10694 inst.instruction |= inst.operands[0].reg << 8;
10695 inst.instruction |= inst.operands[1].reg << 12;
10696 inst.instruction |= inst.operands[2].reg << 16;
10697 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10698 }
10699
10700 static void
10701 do_t_strexd (void)
10702 {
10703 if (!inst.operands[2].present)
10704 inst.operands[2].reg = inst.operands[1].reg + 1;
10705
10706 constraint (inst.operands[0].reg == inst.operands[1].reg
10707 || inst.operands[0].reg == inst.operands[2].reg
10708 || inst.operands[0].reg == inst.operands[3].reg
10709 || inst.operands[1].reg == inst.operands[2].reg,
10710 BAD_OVERLAP);
10711
10712 inst.instruction |= inst.operands[0].reg;
10713 inst.instruction |= inst.operands[1].reg << 12;
10714 inst.instruction |= inst.operands[2].reg << 8;
10715 inst.instruction |= inst.operands[3].reg << 16;
10716 }
10717
10718 static void
10719 do_t_sxtah (void)
10720 {
10721 unsigned Rd, Rn, Rm;
10722
10723 Rd = inst.operands[0].reg;
10724 Rn = inst.operands[1].reg;
10725 Rm = inst.operands[2].reg;
10726
10727 reject_bad_reg (Rd);
10728 reject_bad_reg (Rn);
10729 reject_bad_reg (Rm);
10730
10731 inst.instruction |= Rd << 8;
10732 inst.instruction |= Rn << 16;
10733 inst.instruction |= Rm;
10734 inst.instruction |= inst.operands[3].imm << 4;
10735 }
10736
10737 static void
10738 do_t_sxth (void)
10739 {
10740 unsigned Rd, Rm;
10741
10742 Rd = inst.operands[0].reg;
10743 Rm = inst.operands[1].reg;
10744
10745 reject_bad_reg (Rd);
10746 reject_bad_reg (Rm);
10747
10748 if (inst.instruction <= 0xffff && inst.size_req != 4
10749 && Rd <= 7 && Rm <= 7
10750 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10751 {
10752 inst.instruction = THUMB_OP16 (inst.instruction);
10753 inst.instruction |= Rd;
10754 inst.instruction |= Rm << 3;
10755 }
10756 else if (unified_syntax)
10757 {
10758 if (inst.instruction <= 0xffff)
10759 inst.instruction = THUMB_OP32 (inst.instruction);
10760 inst.instruction |= Rd << 8;
10761 inst.instruction |= Rm;
10762 inst.instruction |= inst.operands[2].imm << 4;
10763 }
10764 else
10765 {
10766 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10767 _("Thumb encoding does not support rotation"));
10768 constraint (1, BAD_HIREG);
10769 }
10770 }
10771
10772 static void
10773 do_t_swi (void)
10774 {
10775 inst.reloc.type = BFD_RELOC_ARM_SWI;
10776 }
10777
10778 static void
10779 do_t_tb (void)
10780 {
10781 unsigned Rn, Rm;
10782 int half;
10783
10784 half = (inst.instruction & 0x10) != 0;
10785 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10786 constraint (inst.operands[0].immisreg,
10787 _("instruction requires register index"));
10788
10789 Rn = inst.operands[0].reg;
10790 Rm = inst.operands[0].imm;
10791
10792 constraint (Rn == REG_SP, BAD_SP);
10793 reject_bad_reg (Rm);
10794
10795 constraint (!half && inst.operands[0].shifted,
10796 _("instruction does not allow shifted index"));
10797 inst.instruction |= (Rn << 16) | Rm;
10798 }
10799
10800 static void
10801 do_t_usat (void)
10802 {
10803 unsigned Rd, Rn;
10804
10805 Rd = inst.operands[0].reg;
10806 Rn = inst.operands[2].reg;
10807
10808 reject_bad_reg (Rd);
10809 reject_bad_reg (Rn);
10810
10811 inst.instruction |= Rd << 8;
10812 inst.instruction |= inst.operands[1].imm;
10813 inst.instruction |= Rn << 16;
10814
10815 if (inst.operands[3].present)
10816 {
10817 constraint (inst.reloc.exp.X_op != O_constant,
10818 _("expression too complex"));
10819 if (inst.reloc.exp.X_add_number != 0)
10820 {
10821 if (inst.operands[3].shift_kind == SHIFT_ASR)
10822 inst.instruction |= 0x00200000; /* sh bit */
10823
10824 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10825 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10826 }
10827 inst.reloc.type = BFD_RELOC_UNUSED;
10828 }
10829 }
10830
10831 static void
10832 do_t_usat16 (void)
10833 {
10834 unsigned Rd, Rn;
10835
10836 Rd = inst.operands[0].reg;
10837 Rn = inst.operands[2].reg;
10838
10839 reject_bad_reg (Rd);
10840 reject_bad_reg (Rn);
10841
10842 inst.instruction |= Rd << 8;
10843 inst.instruction |= inst.operands[1].imm;
10844 inst.instruction |= Rn << 16;
10845 }
10846
10847 /* Neon instruction encoder helpers. */
10848
10849 /* Encodings for the different types for various Neon opcodes. */
10850
10851 /* An "invalid" code for the following tables. */
10852 #define N_INV -1u
10853
10854 struct neon_tab_entry
10855 {
10856 unsigned integer;
10857 unsigned float_or_poly;
10858 unsigned scalar_or_imm;
10859 };
10860
10861 /* Map overloaded Neon opcodes to their respective encodings. */
10862 #define NEON_ENC_TAB \
10863 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10864 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10865 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10866 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10867 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10868 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10869 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10870 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10871 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10872 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10873 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10874 /* Register variants of the following two instructions are encoded as
10875 vcge / vcgt with the operands reversed. */ \
10876 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10877 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10878 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10879 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10880 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10881 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10882 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10883 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10884 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10885 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10886 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10887 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10888 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10889 X(vshl, 0x0000400, N_INV, 0x0800510), \
10890 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10891 X(vand, 0x0000110, N_INV, 0x0800030), \
10892 X(vbic, 0x0100110, N_INV, 0x0800030), \
10893 X(veor, 0x1000110, N_INV, N_INV), \
10894 X(vorn, 0x0300110, N_INV, 0x0800010), \
10895 X(vorr, 0x0200110, N_INV, 0x0800010), \
10896 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10897 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10898 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10899 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10900 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10901 X(vst1, 0x0000000, 0x0800000, N_INV), \
10902 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10903 X(vst2, 0x0000100, 0x0800100, N_INV), \
10904 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10905 X(vst3, 0x0000200, 0x0800200, N_INV), \
10906 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10907 X(vst4, 0x0000300, 0x0800300, N_INV), \
10908 X(vmovn, 0x1b20200, N_INV, N_INV), \
10909 X(vtrn, 0x1b20080, N_INV, N_INV), \
10910 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10911 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10912 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10913 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10914 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10915 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10916 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10917 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10918 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10919
10920 enum neon_opc
10921 {
10922 #define X(OPC,I,F,S) N_MNEM_##OPC
10923 NEON_ENC_TAB
10924 #undef X
10925 };
10926
10927 static const struct neon_tab_entry neon_enc_tab[] =
10928 {
10929 #define X(OPC,I,F,S) { (I), (F), (S) }
10930 NEON_ENC_TAB
10931 #undef X
10932 };
10933
10934 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10935 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10936 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10937 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10938 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10939 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10940 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10941 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10942 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10943 #define NEON_ENC_SINGLE(X) \
10944 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10945 #define NEON_ENC_DOUBLE(X) \
10946 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10947
10948 /* Define shapes for instruction operands. The following mnemonic characters
10949 are used in this table:
10950
10951 F - VFP S<n> register
10952 D - Neon D<n> register
10953 Q - Neon Q<n> register
10954 I - Immediate
10955 S - Scalar
10956 R - ARM register
10957 L - D<n> register list
10958
10959 This table is used to generate various data:
10960 - enumerations of the form NS_DDR to be used as arguments to
10961 neon_select_shape.
10962 - a table classifying shapes into single, double, quad, mixed.
10963 - a table used to drive neon_select_shape. */
10964
10965 #define NEON_SHAPE_DEF \
10966 X(3, (D, D, D), DOUBLE), \
10967 X(3, (Q, Q, Q), QUAD), \
10968 X(3, (D, D, I), DOUBLE), \
10969 X(3, (Q, Q, I), QUAD), \
10970 X(3, (D, D, S), DOUBLE), \
10971 X(3, (Q, Q, S), QUAD), \
10972 X(2, (D, D), DOUBLE), \
10973 X(2, (Q, Q), QUAD), \
10974 X(2, (D, S), DOUBLE), \
10975 X(2, (Q, S), QUAD), \
10976 X(2, (D, R), DOUBLE), \
10977 X(2, (Q, R), QUAD), \
10978 X(2, (D, I), DOUBLE), \
10979 X(2, (Q, I), QUAD), \
10980 X(3, (D, L, D), DOUBLE), \
10981 X(2, (D, Q), MIXED), \
10982 X(2, (Q, D), MIXED), \
10983 X(3, (D, Q, I), MIXED), \
10984 X(3, (Q, D, I), MIXED), \
10985 X(3, (Q, D, D), MIXED), \
10986 X(3, (D, Q, Q), MIXED), \
10987 X(3, (Q, Q, D), MIXED), \
10988 X(3, (Q, D, S), MIXED), \
10989 X(3, (D, Q, S), MIXED), \
10990 X(4, (D, D, D, I), DOUBLE), \
10991 X(4, (Q, Q, Q, I), QUAD), \
10992 X(2, (F, F), SINGLE), \
10993 X(3, (F, F, F), SINGLE), \
10994 X(2, (F, I), SINGLE), \
10995 X(2, (F, D), MIXED), \
10996 X(2, (D, F), MIXED), \
10997 X(3, (F, F, I), MIXED), \
10998 X(4, (R, R, F, F), SINGLE), \
10999 X(4, (F, F, R, R), SINGLE), \
11000 X(3, (D, R, R), DOUBLE), \
11001 X(3, (R, R, D), DOUBLE), \
11002 X(2, (S, R), SINGLE), \
11003 X(2, (R, S), SINGLE), \
11004 X(2, (F, R), SINGLE), \
11005 X(2, (R, F), SINGLE)
11006
11007 #define S2(A,B) NS_##A##B
11008 #define S3(A,B,C) NS_##A##B##C
11009 #define S4(A,B,C,D) NS_##A##B##C##D
11010
11011 #define X(N, L, C) S##N L
11012
11013 enum neon_shape
11014 {
11015 NEON_SHAPE_DEF,
11016 NS_NULL
11017 };
11018
11019 #undef X
11020 #undef S2
11021 #undef S3
11022 #undef S4
11023
11024 enum neon_shape_class
11025 {
11026 SC_SINGLE,
11027 SC_DOUBLE,
11028 SC_QUAD,
11029 SC_MIXED
11030 };
11031
11032 #define X(N, L, C) SC_##C
11033
11034 static enum neon_shape_class neon_shape_class[] =
11035 {
11036 NEON_SHAPE_DEF
11037 };
11038
11039 #undef X
11040
11041 enum neon_shape_el
11042 {
11043 SE_F,
11044 SE_D,
11045 SE_Q,
11046 SE_I,
11047 SE_S,
11048 SE_R,
11049 SE_L
11050 };
11051
11052 /* Register widths of above. */
11053 static unsigned neon_shape_el_size[] =
11054 {
11055 32,
11056 64,
11057 128,
11058 0,
11059 32,
11060 32,
11061 0
11062 };
11063
11064 struct neon_shape_info
11065 {
11066 unsigned els;
11067 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
11068 };
11069
11070 #define S2(A,B) { SE_##A, SE_##B }
11071 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11072 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11073
11074 #define X(N, L, C) { N, S##N L }
11075
11076 static struct neon_shape_info neon_shape_tab[] =
11077 {
11078 NEON_SHAPE_DEF
11079 };
11080
11081 #undef X
11082 #undef S2
11083 #undef S3
11084 #undef S4
11085
11086 /* Bit masks used in type checking given instructions.
11087 'N_EQK' means the type must be the same as (or based on in some way) the key
11088 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11089 set, various other bits can be set as well in order to modify the meaning of
11090 the type constraint. */
11091
11092 enum neon_type_mask
11093 {
11094 N_S8 = 0x0000001,
11095 N_S16 = 0x0000002,
11096 N_S32 = 0x0000004,
11097 N_S64 = 0x0000008,
11098 N_U8 = 0x0000010,
11099 N_U16 = 0x0000020,
11100 N_U32 = 0x0000040,
11101 N_U64 = 0x0000080,
11102 N_I8 = 0x0000100,
11103 N_I16 = 0x0000200,
11104 N_I32 = 0x0000400,
11105 N_I64 = 0x0000800,
11106 N_8 = 0x0001000,
11107 N_16 = 0x0002000,
11108 N_32 = 0x0004000,
11109 N_64 = 0x0008000,
11110 N_P8 = 0x0010000,
11111 N_P16 = 0x0020000,
11112 N_F16 = 0x0040000,
11113 N_F32 = 0x0080000,
11114 N_F64 = 0x0100000,
11115 N_KEY = 0x1000000, /* key element (main type specifier). */
11116 N_EQK = 0x2000000, /* given operand has the same type & size as the key. */
11117 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
11118 N_DBL = 0x0000001, /* if N_EQK, this operand is twice the size. */
11119 N_HLF = 0x0000002, /* if N_EQK, this operand is half the size. */
11120 N_SGN = 0x0000004, /* if N_EQK, this operand is forced to be signed. */
11121 N_UNS = 0x0000008, /* if N_EQK, this operand is forced to be unsigned. */
11122 N_INT = 0x0000010, /* if N_EQK, this operand is forced to be integer. */
11123 N_FLT = 0x0000020, /* if N_EQK, this operand is forced to be float. */
11124 N_SIZ = 0x0000040, /* if N_EQK, this operand is forced to be size-only. */
11125 N_UTYP = 0,
11126 N_MAX_NONSPECIAL = N_F64
11127 };
11128
11129 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11130
11131 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11132 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11133 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11134 #define N_SUF_32 (N_SU_32 | N_F32)
11135 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11136 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11137
11138 /* Pass this as the first type argument to neon_check_type to ignore types
11139 altogether. */
11140 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11141
11142 /* Select a "shape" for the current instruction (describing register types or
11143 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11144 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11145 function of operand parsing, so this function doesn't need to be called.
11146 Shapes should be listed in order of decreasing length. */
11147
11148 static enum neon_shape
11149 neon_select_shape (enum neon_shape shape, ...)
11150 {
11151 va_list ap;
11152 enum neon_shape first_shape = shape;
11153
11154 /* Fix missing optional operands. FIXME: we don't know at this point how
11155 many arguments we should have, so this makes the assumption that we have
11156 > 1. This is true of all current Neon opcodes, I think, but may not be
11157 true in the future. */
11158 if (!inst.operands[1].present)
11159 inst.operands[1] = inst.operands[0];
11160
11161 va_start (ap, shape);
11162
11163 for (; shape != NS_NULL; shape = va_arg (ap, int))
11164 {
11165 unsigned j;
11166 int matches = 1;
11167
11168 for (j = 0; j < neon_shape_tab[shape].els; j++)
11169 {
11170 if (!inst.operands[j].present)
11171 {
11172 matches = 0;
11173 break;
11174 }
11175
11176 switch (neon_shape_tab[shape].el[j])
11177 {
11178 case SE_F:
11179 if (!(inst.operands[j].isreg
11180 && inst.operands[j].isvec
11181 && inst.operands[j].issingle
11182 && !inst.operands[j].isquad))
11183 matches = 0;
11184 break;
11185
11186 case SE_D:
11187 if (!(inst.operands[j].isreg
11188 && inst.operands[j].isvec
11189 && !inst.operands[j].isquad
11190 && !inst.operands[j].issingle))
11191 matches = 0;
11192 break;
11193
11194 case SE_R:
11195 if (!(inst.operands[j].isreg
11196 && !inst.operands[j].isvec))
11197 matches = 0;
11198 break;
11199
11200 case SE_Q:
11201 if (!(inst.operands[j].isreg
11202 && inst.operands[j].isvec
11203 && inst.operands[j].isquad
11204 && !inst.operands[j].issingle))
11205 matches = 0;
11206 break;
11207
11208 case SE_I:
11209 if (!(!inst.operands[j].isreg
11210 && !inst.operands[j].isscalar))
11211 matches = 0;
11212 break;
11213
11214 case SE_S:
11215 if (!(!inst.operands[j].isreg
11216 && inst.operands[j].isscalar))
11217 matches = 0;
11218 break;
11219
11220 case SE_L:
11221 break;
11222 }
11223 }
11224 if (matches)
11225 break;
11226 }
11227
11228 va_end (ap);
11229
11230 if (shape == NS_NULL && first_shape != NS_NULL)
11231 first_error (_("invalid instruction shape"));
11232
11233 return shape;
11234 }
11235
11236 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11237 means the Q bit should be set). */
11238
11239 static int
11240 neon_quad (enum neon_shape shape)
11241 {
11242 return neon_shape_class[shape] == SC_QUAD;
11243 }
11244
11245 static void
11246 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
11247 unsigned *g_size)
11248 {
11249 /* Allow modification to be made to types which are constrained to be
11250 based on the key element, based on bits set alongside N_EQK. */
11251 if ((typebits & N_EQK) != 0)
11252 {
11253 if ((typebits & N_HLF) != 0)
11254 *g_size /= 2;
11255 else if ((typebits & N_DBL) != 0)
11256 *g_size *= 2;
11257 if ((typebits & N_SGN) != 0)
11258 *g_type = NT_signed;
11259 else if ((typebits & N_UNS) != 0)
11260 *g_type = NT_unsigned;
11261 else if ((typebits & N_INT) != 0)
11262 *g_type = NT_integer;
11263 else if ((typebits & N_FLT) != 0)
11264 *g_type = NT_float;
11265 else if ((typebits & N_SIZ) != 0)
11266 *g_type = NT_untyped;
11267 }
11268 }
11269
11270 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11271 operand type, i.e. the single type specified in a Neon instruction when it
11272 is the only one given. */
11273
11274 static struct neon_type_el
11275 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
11276 {
11277 struct neon_type_el dest = *key;
11278
11279 assert ((thisarg & N_EQK) != 0);
11280
11281 neon_modify_type_size (thisarg, &dest.type, &dest.size);
11282
11283 return dest;
11284 }
11285
11286 /* Convert Neon type and size into compact bitmask representation. */
11287
11288 static enum neon_type_mask
11289 type_chk_of_el_type (enum neon_el_type type, unsigned size)
11290 {
11291 switch (type)
11292 {
11293 case NT_untyped:
11294 switch (size)
11295 {
11296 case 8: return N_8;
11297 case 16: return N_16;
11298 case 32: return N_32;
11299 case 64: return N_64;
11300 default: ;
11301 }
11302 break;
11303
11304 case NT_integer:
11305 switch (size)
11306 {
11307 case 8: return N_I8;
11308 case 16: return N_I16;
11309 case 32: return N_I32;
11310 case 64: return N_I64;
11311 default: ;
11312 }
11313 break;
11314
11315 case NT_float:
11316 switch (size)
11317 {
11318 case 16: return N_F16;
11319 case 32: return N_F32;
11320 case 64: return N_F64;
11321 default: ;
11322 }
11323 break;
11324
11325 case NT_poly:
11326 switch (size)
11327 {
11328 case 8: return N_P8;
11329 case 16: return N_P16;
11330 default: ;
11331 }
11332 break;
11333
11334 case NT_signed:
11335 switch (size)
11336 {
11337 case 8: return N_S8;
11338 case 16: return N_S16;
11339 case 32: return N_S32;
11340 case 64: return N_S64;
11341 default: ;
11342 }
11343 break;
11344
11345 case NT_unsigned:
11346 switch (size)
11347 {
11348 case 8: return N_U8;
11349 case 16: return N_U16;
11350 case 32: return N_U32;
11351 case 64: return N_U64;
11352 default: ;
11353 }
11354 break;
11355
11356 default: ;
11357 }
11358
11359 return N_UTYP;
11360 }
11361
11362 /* Convert compact Neon bitmask type representation to a type and size. Only
11363 handles the case where a single bit is set in the mask. */
11364
11365 static int
11366 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
11367 enum neon_type_mask mask)
11368 {
11369 if ((mask & N_EQK) != 0)
11370 return FAIL;
11371
11372 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
11373 *size = 8;
11374 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
11375 *size = 16;
11376 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
11377 *size = 32;
11378 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
11379 *size = 64;
11380 else
11381 return FAIL;
11382
11383 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
11384 *type = NT_signed;
11385 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
11386 *type = NT_unsigned;
11387 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
11388 *type = NT_integer;
11389 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
11390 *type = NT_untyped;
11391 else if ((mask & (N_P8 | N_P16)) != 0)
11392 *type = NT_poly;
11393 else if ((mask & (N_F32 | N_F64)) != 0)
11394 *type = NT_float;
11395 else
11396 return FAIL;
11397
11398 return SUCCESS;
11399 }
11400
11401 /* Modify a bitmask of allowed types. This is only needed for type
11402 relaxation. */
11403
11404 static unsigned
11405 modify_types_allowed (unsigned allowed, unsigned mods)
11406 {
11407 unsigned size;
11408 enum neon_el_type type;
11409 unsigned destmask;
11410 int i;
11411
11412 destmask = 0;
11413
11414 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
11415 {
11416 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
11417 {
11418 neon_modify_type_size (mods, &type, &size);
11419 destmask |= type_chk_of_el_type (type, size);
11420 }
11421 }
11422
11423 return destmask;
11424 }
11425
11426 /* Check type and return type classification.
11427 The manual states (paraphrase): If one datatype is given, it indicates the
11428 type given in:
11429 - the second operand, if there is one
11430 - the operand, if there is no second operand
11431 - the result, if there are no operands.
11432 This isn't quite good enough though, so we use a concept of a "key" datatype
11433 which is set on a per-instruction basis, which is the one which matters when
11434 only one data type is written.
11435 Note: this function has side-effects (e.g. filling in missing operands). All
11436 Neon instructions should call it before performing bit encoding. */
11437
11438 static struct neon_type_el
11439 neon_check_type (unsigned els, enum neon_shape ns, ...)
11440 {
11441 va_list ap;
11442 unsigned i, pass, key_el = 0;
11443 unsigned types[NEON_MAX_TYPE_ELS];
11444 enum neon_el_type k_type = NT_invtype;
11445 unsigned k_size = -1u;
11446 struct neon_type_el badtype = {NT_invtype, -1};
11447 unsigned key_allowed = 0;
11448
11449 /* Optional registers in Neon instructions are always (not) in operand 1.
11450 Fill in the missing operand here, if it was omitted. */
11451 if (els > 1 && !inst.operands[1].present)
11452 inst.operands[1] = inst.operands[0];
11453
11454 /* Suck up all the varargs. */
11455 va_start (ap, ns);
11456 for (i = 0; i < els; i++)
11457 {
11458 unsigned thisarg = va_arg (ap, unsigned);
11459 if (thisarg == N_IGNORE_TYPE)
11460 {
11461 va_end (ap);
11462 return badtype;
11463 }
11464 types[i] = thisarg;
11465 if ((thisarg & N_KEY) != 0)
11466 key_el = i;
11467 }
11468 va_end (ap);
11469
11470 if (inst.vectype.elems > 0)
11471 for (i = 0; i < els; i++)
11472 if (inst.operands[i].vectype.type != NT_invtype)
11473 {
11474 first_error (_("types specified in both the mnemonic and operands"));
11475 return badtype;
11476 }
11477
11478 /* Duplicate inst.vectype elements here as necessary.
11479 FIXME: No idea if this is exactly the same as the ARM assembler,
11480 particularly when an insn takes one register and one non-register
11481 operand. */
11482 if (inst.vectype.elems == 1 && els > 1)
11483 {
11484 unsigned j;
11485 inst.vectype.elems = els;
11486 inst.vectype.el[key_el] = inst.vectype.el[0];
11487 for (j = 0; j < els; j++)
11488 if (j != key_el)
11489 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
11490 types[j]);
11491 }
11492 else if (inst.vectype.elems == 0 && els > 0)
11493 {
11494 unsigned j;
11495 /* No types were given after the mnemonic, so look for types specified
11496 after each operand. We allow some flexibility here; as long as the
11497 "key" operand has a type, we can infer the others. */
11498 for (j = 0; j < els; j++)
11499 if (inst.operands[j].vectype.type != NT_invtype)
11500 inst.vectype.el[j] = inst.operands[j].vectype;
11501
11502 if (inst.operands[key_el].vectype.type != NT_invtype)
11503 {
11504 for (j = 0; j < els; j++)
11505 if (inst.operands[j].vectype.type == NT_invtype)
11506 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
11507 types[j]);
11508 }
11509 else
11510 {
11511 first_error (_("operand types can't be inferred"));
11512 return badtype;
11513 }
11514 }
11515 else if (inst.vectype.elems != els)
11516 {
11517 first_error (_("type specifier has the wrong number of parts"));
11518 return badtype;
11519 }
11520
11521 for (pass = 0; pass < 2; pass++)
11522 {
11523 for (i = 0; i < els; i++)
11524 {
11525 unsigned thisarg = types[i];
11526 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
11527 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
11528 enum neon_el_type g_type = inst.vectype.el[i].type;
11529 unsigned g_size = inst.vectype.el[i].size;
11530
11531 /* Decay more-specific signed & unsigned types to sign-insensitive
11532 integer types if sign-specific variants are unavailable. */
11533 if ((g_type == NT_signed || g_type == NT_unsigned)
11534 && (types_allowed & N_SU_ALL) == 0)
11535 g_type = NT_integer;
11536
11537 /* If only untyped args are allowed, decay any more specific types to
11538 them. Some instructions only care about signs for some element
11539 sizes, so handle that properly. */
11540 if ((g_size == 8 && (types_allowed & N_8) != 0)
11541 || (g_size == 16 && (types_allowed & N_16) != 0)
11542 || (g_size == 32 && (types_allowed & N_32) != 0)
11543 || (g_size == 64 && (types_allowed & N_64) != 0))
11544 g_type = NT_untyped;
11545
11546 if (pass == 0)
11547 {
11548 if ((thisarg & N_KEY) != 0)
11549 {
11550 k_type = g_type;
11551 k_size = g_size;
11552 key_allowed = thisarg & ~N_KEY;
11553 }
11554 }
11555 else
11556 {
11557 if ((thisarg & N_VFP) != 0)
11558 {
11559 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
11560 unsigned regwidth = neon_shape_el_size[regshape], match;
11561
11562 /* In VFP mode, operands must match register widths. If we
11563 have a key operand, use its width, else use the width of
11564 the current operand. */
11565 if (k_size != -1u)
11566 match = k_size;
11567 else
11568 match = g_size;
11569
11570 if (regwidth != match)
11571 {
11572 first_error (_("operand size must match register width"));
11573 return badtype;
11574 }
11575 }
11576
11577 if ((thisarg & N_EQK) == 0)
11578 {
11579 unsigned given_type = type_chk_of_el_type (g_type, g_size);
11580
11581 if ((given_type & types_allowed) == 0)
11582 {
11583 first_error (_("bad type in Neon instruction"));
11584 return badtype;
11585 }
11586 }
11587 else
11588 {
11589 enum neon_el_type mod_k_type = k_type;
11590 unsigned mod_k_size = k_size;
11591 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
11592 if (g_type != mod_k_type || g_size != mod_k_size)
11593 {
11594 first_error (_("inconsistent types in Neon instruction"));
11595 return badtype;
11596 }
11597 }
11598 }
11599 }
11600 }
11601
11602 return inst.vectype.el[key_el];
11603 }
11604
11605 /* Neon-style VFP instruction forwarding. */
11606
11607 /* Thumb VFP instructions have 0xE in the condition field. */
11608
11609 static void
11610 do_vfp_cond_or_thumb (void)
11611 {
11612 if (thumb_mode)
11613 inst.instruction |= 0xe0000000;
11614 else
11615 inst.instruction |= inst.cond << 28;
11616 }
11617
11618 /* Look up and encode a simple mnemonic, for use as a helper function for the
11619 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11620 etc. It is assumed that operand parsing has already been done, and that the
11621 operands are in the form expected by the given opcode (this isn't necessarily
11622 the same as the form in which they were parsed, hence some massaging must
11623 take place before this function is called).
11624 Checks current arch version against that in the looked-up opcode. */
11625
11626 static void
11627 do_vfp_nsyn_opcode (const char *opname)
11628 {
11629 const struct asm_opcode *opcode;
11630
11631 opcode = hash_find (arm_ops_hsh, opname);
11632
11633 if (!opcode)
11634 abort ();
11635
11636 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
11637 thumb_mode ? *opcode->tvariant : *opcode->avariant),
11638 _(BAD_FPU));
11639
11640 if (thumb_mode)
11641 {
11642 inst.instruction = opcode->tvalue;
11643 opcode->tencode ();
11644 }
11645 else
11646 {
11647 inst.instruction = (inst.cond << 28) | opcode->avalue;
11648 opcode->aencode ();
11649 }
11650 }
11651
11652 static void
11653 do_vfp_nsyn_add_sub (enum neon_shape rs)
11654 {
11655 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
11656
11657 if (rs == NS_FFF)
11658 {
11659 if (is_add)
11660 do_vfp_nsyn_opcode ("fadds");
11661 else
11662 do_vfp_nsyn_opcode ("fsubs");
11663 }
11664 else
11665 {
11666 if (is_add)
11667 do_vfp_nsyn_opcode ("faddd");
11668 else
11669 do_vfp_nsyn_opcode ("fsubd");
11670 }
11671 }
11672
11673 /* Check operand types to see if this is a VFP instruction, and if so call
11674 PFN (). */
11675
11676 static int
11677 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11678 {
11679 enum neon_shape rs;
11680 struct neon_type_el et;
11681
11682 switch (args)
11683 {
11684 case 2:
11685 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11686 et = neon_check_type (2, rs,
11687 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11688 break;
11689
11690 case 3:
11691 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11692 et = neon_check_type (3, rs,
11693 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11694 break;
11695
11696 default:
11697 abort ();
11698 }
11699
11700 if (et.type != NT_invtype)
11701 {
11702 pfn (rs);
11703 return SUCCESS;
11704 }
11705 else
11706 inst.error = NULL;
11707
11708 return FAIL;
11709 }
11710
11711 static void
11712 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11713 {
11714 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11715
11716 if (rs == NS_FFF)
11717 {
11718 if (is_mla)
11719 do_vfp_nsyn_opcode ("fmacs");
11720 else
11721 do_vfp_nsyn_opcode ("fmscs");
11722 }
11723 else
11724 {
11725 if (is_mla)
11726 do_vfp_nsyn_opcode ("fmacd");
11727 else
11728 do_vfp_nsyn_opcode ("fmscd");
11729 }
11730 }
11731
11732 static void
11733 do_vfp_nsyn_mul (enum neon_shape rs)
11734 {
11735 if (rs == NS_FFF)
11736 do_vfp_nsyn_opcode ("fmuls");
11737 else
11738 do_vfp_nsyn_opcode ("fmuld");
11739 }
11740
11741 static void
11742 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11743 {
11744 int is_neg = (inst.instruction & 0x80) != 0;
11745 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11746
11747 if (rs == NS_FF)
11748 {
11749 if (is_neg)
11750 do_vfp_nsyn_opcode ("fnegs");
11751 else
11752 do_vfp_nsyn_opcode ("fabss");
11753 }
11754 else
11755 {
11756 if (is_neg)
11757 do_vfp_nsyn_opcode ("fnegd");
11758 else
11759 do_vfp_nsyn_opcode ("fabsd");
11760 }
11761 }
11762
11763 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11764 insns belong to Neon, and are handled elsewhere. */
11765
11766 static void
11767 do_vfp_nsyn_ldm_stm (int is_dbmode)
11768 {
11769 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11770 if (is_ldm)
11771 {
11772 if (is_dbmode)
11773 do_vfp_nsyn_opcode ("fldmdbs");
11774 else
11775 do_vfp_nsyn_opcode ("fldmias");
11776 }
11777 else
11778 {
11779 if (is_dbmode)
11780 do_vfp_nsyn_opcode ("fstmdbs");
11781 else
11782 do_vfp_nsyn_opcode ("fstmias");
11783 }
11784 }
11785
11786 static void
11787 do_vfp_nsyn_sqrt (void)
11788 {
11789 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11790 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11791
11792 if (rs == NS_FF)
11793 do_vfp_nsyn_opcode ("fsqrts");
11794 else
11795 do_vfp_nsyn_opcode ("fsqrtd");
11796 }
11797
11798 static void
11799 do_vfp_nsyn_div (void)
11800 {
11801 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11802 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11803 N_F32 | N_F64 | N_KEY | N_VFP);
11804
11805 if (rs == NS_FFF)
11806 do_vfp_nsyn_opcode ("fdivs");
11807 else
11808 do_vfp_nsyn_opcode ("fdivd");
11809 }
11810
11811 static void
11812 do_vfp_nsyn_nmul (void)
11813 {
11814 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11815 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11816 N_F32 | N_F64 | N_KEY | N_VFP);
11817
11818 if (rs == NS_FFF)
11819 {
11820 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11821 do_vfp_sp_dyadic ();
11822 }
11823 else
11824 {
11825 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11826 do_vfp_dp_rd_rn_rm ();
11827 }
11828 do_vfp_cond_or_thumb ();
11829 }
11830
11831 static void
11832 do_vfp_nsyn_cmp (void)
11833 {
11834 if (inst.operands[1].isreg)
11835 {
11836 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11837 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11838
11839 if (rs == NS_FF)
11840 {
11841 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11842 do_vfp_sp_monadic ();
11843 }
11844 else
11845 {
11846 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11847 do_vfp_dp_rd_rm ();
11848 }
11849 }
11850 else
11851 {
11852 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11853 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11854
11855 switch (inst.instruction & 0x0fffffff)
11856 {
11857 case N_MNEM_vcmp:
11858 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11859 break;
11860 case N_MNEM_vcmpe:
11861 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11862 break;
11863 default:
11864 abort ();
11865 }
11866
11867 if (rs == NS_FI)
11868 {
11869 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11870 do_vfp_sp_compare_z ();
11871 }
11872 else
11873 {
11874 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11875 do_vfp_dp_rd ();
11876 }
11877 }
11878 do_vfp_cond_or_thumb ();
11879 }
11880
11881 static void
11882 nsyn_insert_sp (void)
11883 {
11884 inst.operands[1] = inst.operands[0];
11885 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11886 inst.operands[0].reg = REG_SP;
11887 inst.operands[0].isreg = 1;
11888 inst.operands[0].writeback = 1;
11889 inst.operands[0].present = 1;
11890 }
11891
11892 static void
11893 do_vfp_nsyn_push (void)
11894 {
11895 nsyn_insert_sp ();
11896 if (inst.operands[1].issingle)
11897 do_vfp_nsyn_opcode ("fstmdbs");
11898 else
11899 do_vfp_nsyn_opcode ("fstmdbd");
11900 }
11901
11902 static void
11903 do_vfp_nsyn_pop (void)
11904 {
11905 nsyn_insert_sp ();
11906 if (inst.operands[1].issingle)
11907 do_vfp_nsyn_opcode ("fldmias");
11908 else
11909 do_vfp_nsyn_opcode ("fldmiad");
11910 }
11911
11912 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11913 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11914
11915 static unsigned
11916 neon_dp_fixup (unsigned i)
11917 {
11918 if (thumb_mode)
11919 {
11920 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11921 if (i & (1 << 24))
11922 i |= 1 << 28;
11923
11924 i &= ~(1 << 24);
11925
11926 i |= 0xef000000;
11927 }
11928 else
11929 i |= 0xf2000000;
11930
11931 return i;
11932 }
11933
11934 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11935 (0, 1, 2, 3). */
11936
11937 static unsigned
11938 neon_logbits (unsigned x)
11939 {
11940 return ffs (x) - 4;
11941 }
11942
11943 #define LOW4(R) ((R) & 0xf)
11944 #define HI1(R) (((R) >> 4) & 1)
11945
11946 /* Encode insns with bit pattern:
11947
11948 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11949 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11950
11951 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11952 different meaning for some instruction. */
11953
11954 static void
11955 neon_three_same (int isquad, int ubit, int size)
11956 {
11957 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11958 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11959 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11960 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11961 inst.instruction |= LOW4 (inst.operands[2].reg);
11962 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11963 inst.instruction |= (isquad != 0) << 6;
11964 inst.instruction |= (ubit != 0) << 24;
11965 if (size != -1)
11966 inst.instruction |= neon_logbits (size) << 20;
11967
11968 inst.instruction = neon_dp_fixup (inst.instruction);
11969 }
11970
11971 /* Encode instructions of the form:
11972
11973 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11974 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11975
11976 Don't write size if SIZE == -1. */
11977
11978 static void
11979 neon_two_same (int qbit, int ubit, int size)
11980 {
11981 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11982 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11983 inst.instruction |= LOW4 (inst.operands[1].reg);
11984 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11985 inst.instruction |= (qbit != 0) << 6;
11986 inst.instruction |= (ubit != 0) << 24;
11987
11988 if (size != -1)
11989 inst.instruction |= neon_logbits (size) << 18;
11990
11991 inst.instruction = neon_dp_fixup (inst.instruction);
11992 }
11993
11994 /* Neon instruction encoders, in approximate order of appearance. */
11995
11996 static void
11997 do_neon_dyadic_i_su (void)
11998 {
11999 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12000 struct neon_type_el et = neon_check_type (3, rs,
12001 N_EQK, N_EQK, N_SU_32 | N_KEY);
12002 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12003 }
12004
12005 static void
12006 do_neon_dyadic_i64_su (void)
12007 {
12008 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12009 struct neon_type_el et = neon_check_type (3, rs,
12010 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12011 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12012 }
12013
12014 static void
12015 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
12016 unsigned immbits)
12017 {
12018 unsigned size = et.size >> 3;
12019 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12020 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12021 inst.instruction |= LOW4 (inst.operands[1].reg);
12022 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12023 inst.instruction |= (isquad != 0) << 6;
12024 inst.instruction |= immbits << 16;
12025 inst.instruction |= (size >> 3) << 7;
12026 inst.instruction |= (size & 0x7) << 19;
12027 if (write_ubit)
12028 inst.instruction |= (uval != 0) << 24;
12029
12030 inst.instruction = neon_dp_fixup (inst.instruction);
12031 }
12032
12033 static void
12034 do_neon_shl_imm (void)
12035 {
12036 if (!inst.operands[2].isreg)
12037 {
12038 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12039 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
12040 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12041 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
12042 }
12043 else
12044 {
12045 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12046 struct neon_type_el et = neon_check_type (3, rs,
12047 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12048 unsigned int tmp;
12049
12050 /* VSHL/VQSHL 3-register variants have syntax such as:
12051 vshl.xx Dd, Dm, Dn
12052 whereas other 3-register operations encoded by neon_three_same have
12053 syntax like:
12054 vadd.xx Dd, Dn, Dm
12055 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12056 here. */
12057 tmp = inst.operands[2].reg;
12058 inst.operands[2].reg = inst.operands[1].reg;
12059 inst.operands[1].reg = tmp;
12060 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12061 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12062 }
12063 }
12064
12065 static void
12066 do_neon_qshl_imm (void)
12067 {
12068 if (!inst.operands[2].isreg)
12069 {
12070 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12071 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
12072
12073 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12074 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
12075 inst.operands[2].imm);
12076 }
12077 else
12078 {
12079 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12080 struct neon_type_el et = neon_check_type (3, rs,
12081 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12082 unsigned int tmp;
12083
12084 /* See note in do_neon_shl_imm. */
12085 tmp = inst.operands[2].reg;
12086 inst.operands[2].reg = inst.operands[1].reg;
12087 inst.operands[1].reg = tmp;
12088 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12089 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12090 }
12091 }
12092
12093 static void
12094 do_neon_rshl (void)
12095 {
12096 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12097 struct neon_type_el et = neon_check_type (3, rs,
12098 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12099 unsigned int tmp;
12100
12101 tmp = inst.operands[2].reg;
12102 inst.operands[2].reg = inst.operands[1].reg;
12103 inst.operands[1].reg = tmp;
12104 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12105 }
12106
12107 static int
12108 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
12109 {
12110 /* Handle .I8 pseudo-instructions. */
12111 if (size == 8)
12112 {
12113 /* Unfortunately, this will make everything apart from zero out-of-range.
12114 FIXME is this the intended semantics? There doesn't seem much point in
12115 accepting .I8 if so. */
12116 immediate |= immediate << 8;
12117 size = 16;
12118 }
12119
12120 if (size >= 32)
12121 {
12122 if (immediate == (immediate & 0x000000ff))
12123 {
12124 *immbits = immediate;
12125 return 0x1;
12126 }
12127 else if (immediate == (immediate & 0x0000ff00))
12128 {
12129 *immbits = immediate >> 8;
12130 return 0x3;
12131 }
12132 else if (immediate == (immediate & 0x00ff0000))
12133 {
12134 *immbits = immediate >> 16;
12135 return 0x5;
12136 }
12137 else if (immediate == (immediate & 0xff000000))
12138 {
12139 *immbits = immediate >> 24;
12140 return 0x7;
12141 }
12142 if ((immediate & 0xffff) != (immediate >> 16))
12143 goto bad_immediate;
12144 immediate &= 0xffff;
12145 }
12146
12147 if (immediate == (immediate & 0x000000ff))
12148 {
12149 *immbits = immediate;
12150 return 0x9;
12151 }
12152 else if (immediate == (immediate & 0x0000ff00))
12153 {
12154 *immbits = immediate >> 8;
12155 return 0xb;
12156 }
12157
12158 bad_immediate:
12159 first_error (_("immediate value out of range"));
12160 return FAIL;
12161 }
12162
12163 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12164 A, B, C, D. */
12165
12166 static int
12167 neon_bits_same_in_bytes (unsigned imm)
12168 {
12169 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
12170 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
12171 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
12172 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
12173 }
12174
12175 /* For immediate of above form, return 0bABCD. */
12176
12177 static unsigned
12178 neon_squash_bits (unsigned imm)
12179 {
12180 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
12181 | ((imm & 0x01000000) >> 21);
12182 }
12183
12184 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12185
12186 static unsigned
12187 neon_qfloat_bits (unsigned imm)
12188 {
12189 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
12190 }
12191
12192 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12193 the instruction. *OP is passed as the initial value of the op field, and
12194 may be set to a different value depending on the constant (i.e.
12195 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12196 MVN). If the immediate looks like a repeated pattern then also
12197 try smaller element sizes. */
12198
12199 static int
12200 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
12201 unsigned *immbits, int *op, int size,
12202 enum neon_el_type type)
12203 {
12204 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12205 float. */
12206 if (type == NT_float && !float_p)
12207 return FAIL;
12208
12209 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
12210 {
12211 if (size != 32 || *op == 1)
12212 return FAIL;
12213 *immbits = neon_qfloat_bits (immlo);
12214 return 0xf;
12215 }
12216
12217 if (size == 64)
12218 {
12219 if (neon_bits_same_in_bytes (immhi)
12220 && neon_bits_same_in_bytes (immlo))
12221 {
12222 if (*op == 1)
12223 return FAIL;
12224 *immbits = (neon_squash_bits (immhi) << 4)
12225 | neon_squash_bits (immlo);
12226 *op = 1;
12227 return 0xe;
12228 }
12229
12230 if (immhi != immlo)
12231 return FAIL;
12232 }
12233
12234 if (size >= 32)
12235 {
12236 if (immlo == (immlo & 0x000000ff))
12237 {
12238 *immbits = immlo;
12239 return 0x0;
12240 }
12241 else if (immlo == (immlo & 0x0000ff00))
12242 {
12243 *immbits = immlo >> 8;
12244 return 0x2;
12245 }
12246 else if (immlo == (immlo & 0x00ff0000))
12247 {
12248 *immbits = immlo >> 16;
12249 return 0x4;
12250 }
12251 else if (immlo == (immlo & 0xff000000))
12252 {
12253 *immbits = immlo >> 24;
12254 return 0x6;
12255 }
12256 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
12257 {
12258 *immbits = (immlo >> 8) & 0xff;
12259 return 0xc;
12260 }
12261 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
12262 {
12263 *immbits = (immlo >> 16) & 0xff;
12264 return 0xd;
12265 }
12266
12267 if ((immlo & 0xffff) != (immlo >> 16))
12268 return FAIL;
12269 immlo &= 0xffff;
12270 }
12271
12272 if (size >= 16)
12273 {
12274 if (immlo == (immlo & 0x000000ff))
12275 {
12276 *immbits = immlo;
12277 return 0x8;
12278 }
12279 else if (immlo == (immlo & 0x0000ff00))
12280 {
12281 *immbits = immlo >> 8;
12282 return 0xa;
12283 }
12284
12285 if ((immlo & 0xff) != (immlo >> 8))
12286 return FAIL;
12287 immlo &= 0xff;
12288 }
12289
12290 if (immlo == (immlo & 0x000000ff))
12291 {
12292 /* Don't allow MVN with 8-bit immediate. */
12293 if (*op == 1)
12294 return FAIL;
12295 *immbits = immlo;
12296 return 0xe;
12297 }
12298
12299 return FAIL;
12300 }
12301
12302 /* Write immediate bits [7:0] to the following locations:
12303
12304 |28/24|23 19|18 16|15 4|3 0|
12305 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12306
12307 This function is used by VMOV/VMVN/VORR/VBIC. */
12308
12309 static void
12310 neon_write_immbits (unsigned immbits)
12311 {
12312 inst.instruction |= immbits & 0xf;
12313 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
12314 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
12315 }
12316
12317 /* Invert low-order SIZE bits of XHI:XLO. */
12318
12319 static void
12320 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
12321 {
12322 unsigned immlo = xlo ? *xlo : 0;
12323 unsigned immhi = xhi ? *xhi : 0;
12324
12325 switch (size)
12326 {
12327 case 8:
12328 immlo = (~immlo) & 0xff;
12329 break;
12330
12331 case 16:
12332 immlo = (~immlo) & 0xffff;
12333 break;
12334
12335 case 64:
12336 immhi = (~immhi) & 0xffffffff;
12337 /* fall through. */
12338
12339 case 32:
12340 immlo = (~immlo) & 0xffffffff;
12341 break;
12342
12343 default:
12344 abort ();
12345 }
12346
12347 if (xlo)
12348 *xlo = immlo;
12349
12350 if (xhi)
12351 *xhi = immhi;
12352 }
12353
12354 static void
12355 do_neon_logic (void)
12356 {
12357 if (inst.operands[2].present && inst.operands[2].isreg)
12358 {
12359 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12360 neon_check_type (3, rs, N_IGNORE_TYPE);
12361 /* U bit and size field were set as part of the bitmask. */
12362 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12363 neon_three_same (neon_quad (rs), 0, -1);
12364 }
12365 else
12366 {
12367 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12368 struct neon_type_el et = neon_check_type (2, rs,
12369 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12370 enum neon_opc opcode = inst.instruction & 0x0fffffff;
12371 unsigned immbits;
12372 int cmode;
12373
12374 if (et.type == NT_invtype)
12375 return;
12376
12377 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12378
12379 immbits = inst.operands[1].imm;
12380 if (et.size == 64)
12381 {
12382 /* .i64 is a pseudo-op, so the immediate must be a repeating
12383 pattern. */
12384 if (immbits != (inst.operands[1].regisimm ?
12385 inst.operands[1].reg : 0))
12386 {
12387 /* Set immbits to an invalid constant. */
12388 immbits = 0xdeadbeef;
12389 }
12390 }
12391
12392 switch (opcode)
12393 {
12394 case N_MNEM_vbic:
12395 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12396 break;
12397
12398 case N_MNEM_vorr:
12399 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12400 break;
12401
12402 case N_MNEM_vand:
12403 /* Pseudo-instruction for VBIC. */
12404 neon_invert_size (&immbits, 0, et.size);
12405 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12406 break;
12407
12408 case N_MNEM_vorn:
12409 /* Pseudo-instruction for VORR. */
12410 neon_invert_size (&immbits, 0, et.size);
12411 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12412 break;
12413
12414 default:
12415 abort ();
12416 }
12417
12418 if (cmode == FAIL)
12419 return;
12420
12421 inst.instruction |= neon_quad (rs) << 6;
12422 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12423 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12424 inst.instruction |= cmode << 8;
12425 neon_write_immbits (immbits);
12426
12427 inst.instruction = neon_dp_fixup (inst.instruction);
12428 }
12429 }
12430
12431 static void
12432 do_neon_bitfield (void)
12433 {
12434 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12435 neon_check_type (3, rs, N_IGNORE_TYPE);
12436 neon_three_same (neon_quad (rs), 0, -1);
12437 }
12438
12439 static void
12440 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
12441 unsigned destbits)
12442 {
12443 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12444 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
12445 types | N_KEY);
12446 if (et.type == NT_float)
12447 {
12448 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
12449 neon_three_same (neon_quad (rs), 0, -1);
12450 }
12451 else
12452 {
12453 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12454 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
12455 }
12456 }
12457
12458 static void
12459 do_neon_dyadic_if_su (void)
12460 {
12461 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
12462 }
12463
12464 static void
12465 do_neon_dyadic_if_su_d (void)
12466 {
12467 /* This version only allow D registers, but that constraint is enforced during
12468 operand parsing so we don't need to do anything extra here. */
12469 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
12470 }
12471
12472 static void
12473 do_neon_dyadic_if_i_d (void)
12474 {
12475 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12476 affected if we specify unsigned args. */
12477 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12478 }
12479
12480 enum vfp_or_neon_is_neon_bits
12481 {
12482 NEON_CHECK_CC = 1,
12483 NEON_CHECK_ARCH = 2
12484 };
12485
12486 /* Call this function if an instruction which may have belonged to the VFP or
12487 Neon instruction sets, but turned out to be a Neon instruction (due to the
12488 operand types involved, etc.). We have to check and/or fix-up a couple of
12489 things:
12490
12491 - Make sure the user hasn't attempted to make a Neon instruction
12492 conditional.
12493 - Alter the value in the condition code field if necessary.
12494 - Make sure that the arch supports Neon instructions.
12495
12496 Which of these operations take place depends on bits from enum
12497 vfp_or_neon_is_neon_bits.
12498
12499 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
12500 current instruction's condition is COND_ALWAYS, the condition field is
12501 changed to inst.uncond_value. This is necessary because instructions shared
12502 between VFP and Neon may be conditional for the VFP variants only, and the
12503 unconditional Neon version must have, e.g., 0xF in the condition field. */
12504
12505 static int
12506 vfp_or_neon_is_neon (unsigned check)
12507 {
12508 /* Conditions are always legal in Thumb mode (IT blocks). */
12509 if (!thumb_mode && (check & NEON_CHECK_CC))
12510 {
12511 if (inst.cond != COND_ALWAYS)
12512 {
12513 first_error (_(BAD_COND));
12514 return FAIL;
12515 }
12516 if (inst.uncond_value != -1)
12517 inst.instruction |= inst.uncond_value << 28;
12518 }
12519
12520 if ((check & NEON_CHECK_ARCH)
12521 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
12522 {
12523 first_error (_(BAD_FPU));
12524 return FAIL;
12525 }
12526
12527 return SUCCESS;
12528 }
12529
12530 static void
12531 do_neon_addsub_if_i (void)
12532 {
12533 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
12534 return;
12535
12536 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12537 return;
12538
12539 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12540 affected if we specify unsigned args. */
12541 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
12542 }
12543
12544 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12545 result to be:
12546 V<op> A,B (A is operand 0, B is operand 2)
12547 to mean:
12548 V<op> A,B,A
12549 not:
12550 V<op> A,B,B
12551 so handle that case specially. */
12552
12553 static void
12554 neon_exchange_operands (void)
12555 {
12556 void *scratch = alloca (sizeof (inst.operands[0]));
12557 if (inst.operands[1].present)
12558 {
12559 /* Swap operands[1] and operands[2]. */
12560 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
12561 inst.operands[1] = inst.operands[2];
12562 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
12563 }
12564 else
12565 {
12566 inst.operands[1] = inst.operands[2];
12567 inst.operands[2] = inst.operands[0];
12568 }
12569 }
12570
12571 static void
12572 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
12573 {
12574 if (inst.operands[2].isreg)
12575 {
12576 if (invert)
12577 neon_exchange_operands ();
12578 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
12579 }
12580 else
12581 {
12582 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12583 struct neon_type_el et = neon_check_type (2, rs,
12584 N_EQK | N_SIZ, immtypes | N_KEY);
12585
12586 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12587 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12588 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12589 inst.instruction |= LOW4 (inst.operands[1].reg);
12590 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12591 inst.instruction |= neon_quad (rs) << 6;
12592 inst.instruction |= (et.type == NT_float) << 10;
12593 inst.instruction |= neon_logbits (et.size) << 18;
12594
12595 inst.instruction = neon_dp_fixup (inst.instruction);
12596 }
12597 }
12598
12599 static void
12600 do_neon_cmp (void)
12601 {
12602 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
12603 }
12604
12605 static void
12606 do_neon_cmp_inv (void)
12607 {
12608 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
12609 }
12610
12611 static void
12612 do_neon_ceq (void)
12613 {
12614 neon_compare (N_IF_32, N_IF_32, FALSE);
12615 }
12616
12617 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12618 scalars, which are encoded in 5 bits, M : Rm.
12619 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12620 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12621 index in M. */
12622
12623 static unsigned
12624 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
12625 {
12626 unsigned regno = NEON_SCALAR_REG (scalar);
12627 unsigned elno = NEON_SCALAR_INDEX (scalar);
12628
12629 switch (elsize)
12630 {
12631 case 16:
12632 if (regno > 7 || elno > 3)
12633 goto bad_scalar;
12634 return regno | (elno << 3);
12635
12636 case 32:
12637 if (regno > 15 || elno > 1)
12638 goto bad_scalar;
12639 return regno | (elno << 4);
12640
12641 default:
12642 bad_scalar:
12643 first_error (_("scalar out of range for multiply instruction"));
12644 }
12645
12646 return 0;
12647 }
12648
12649 /* Encode multiply / multiply-accumulate scalar instructions. */
12650
12651 static void
12652 neon_mul_mac (struct neon_type_el et, int ubit)
12653 {
12654 unsigned scalar;
12655
12656 /* Give a more helpful error message if we have an invalid type. */
12657 if (et.type == NT_invtype)
12658 return;
12659
12660 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
12661 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12662 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12663 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12664 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12665 inst.instruction |= LOW4 (scalar);
12666 inst.instruction |= HI1 (scalar) << 5;
12667 inst.instruction |= (et.type == NT_float) << 8;
12668 inst.instruction |= neon_logbits (et.size) << 20;
12669 inst.instruction |= (ubit != 0) << 24;
12670
12671 inst.instruction = neon_dp_fixup (inst.instruction);
12672 }
12673
12674 static void
12675 do_neon_mac_maybe_scalar (void)
12676 {
12677 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
12678 return;
12679
12680 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12681 return;
12682
12683 if (inst.operands[2].isscalar)
12684 {
12685 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12686 struct neon_type_el et = neon_check_type (3, rs,
12687 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
12688 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12689 neon_mul_mac (et, neon_quad (rs));
12690 }
12691 else
12692 {
12693 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12694 affected if we specify unsigned args. */
12695 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12696 }
12697 }
12698
12699 static void
12700 do_neon_tst (void)
12701 {
12702 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12703 struct neon_type_el et = neon_check_type (3, rs,
12704 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12705 neon_three_same (neon_quad (rs), 0, et.size);
12706 }
12707
12708 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12709 same types as the MAC equivalents. The polynomial type for this instruction
12710 is encoded the same as the integer type. */
12711
12712 static void
12713 do_neon_mul (void)
12714 {
12715 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12716 return;
12717
12718 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12719 return;
12720
12721 if (inst.operands[2].isscalar)
12722 do_neon_mac_maybe_scalar ();
12723 else
12724 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12725 }
12726
12727 static void
12728 do_neon_qdmulh (void)
12729 {
12730 if (inst.operands[2].isscalar)
12731 {
12732 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12733 struct neon_type_el et = neon_check_type (3, rs,
12734 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12735 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12736 neon_mul_mac (et, neon_quad (rs));
12737 }
12738 else
12739 {
12740 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12741 struct neon_type_el et = neon_check_type (3, rs,
12742 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12743 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12744 /* The U bit (rounding) comes from bit mask. */
12745 neon_three_same (neon_quad (rs), 0, et.size);
12746 }
12747 }
12748
12749 static void
12750 do_neon_fcmp_absolute (void)
12751 {
12752 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12753 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12754 /* Size field comes from bit mask. */
12755 neon_three_same (neon_quad (rs), 1, -1);
12756 }
12757
12758 static void
12759 do_neon_fcmp_absolute_inv (void)
12760 {
12761 neon_exchange_operands ();
12762 do_neon_fcmp_absolute ();
12763 }
12764
12765 static void
12766 do_neon_step (void)
12767 {
12768 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12769 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12770 neon_three_same (neon_quad (rs), 0, -1);
12771 }
12772
12773 static void
12774 do_neon_abs_neg (void)
12775 {
12776 enum neon_shape rs;
12777 struct neon_type_el et;
12778
12779 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12780 return;
12781
12782 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12783 return;
12784
12785 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12786 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12787
12788 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12789 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12790 inst.instruction |= LOW4 (inst.operands[1].reg);
12791 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12792 inst.instruction |= neon_quad (rs) << 6;
12793 inst.instruction |= (et.type == NT_float) << 10;
12794 inst.instruction |= neon_logbits (et.size) << 18;
12795
12796 inst.instruction = neon_dp_fixup (inst.instruction);
12797 }
12798
12799 static void
12800 do_neon_sli (void)
12801 {
12802 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12803 struct neon_type_el et = neon_check_type (2, rs,
12804 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12805 int imm = inst.operands[2].imm;
12806 constraint (imm < 0 || (unsigned)imm >= et.size,
12807 _("immediate out of range for insert"));
12808 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12809 }
12810
12811 static void
12812 do_neon_sri (void)
12813 {
12814 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12815 struct neon_type_el et = neon_check_type (2, rs,
12816 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12817 int imm = inst.operands[2].imm;
12818 constraint (imm < 1 || (unsigned)imm > et.size,
12819 _("immediate out of range for insert"));
12820 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12821 }
12822
12823 static void
12824 do_neon_qshlu_imm (void)
12825 {
12826 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12827 struct neon_type_el et = neon_check_type (2, rs,
12828 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12829 int imm = inst.operands[2].imm;
12830 constraint (imm < 0 || (unsigned)imm >= et.size,
12831 _("immediate out of range for shift"));
12832 /* Only encodes the 'U present' variant of the instruction.
12833 In this case, signed types have OP (bit 8) set to 0.
12834 Unsigned types have OP set to 1. */
12835 inst.instruction |= (et.type == NT_unsigned) << 8;
12836 /* The rest of the bits are the same as other immediate shifts. */
12837 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12838 }
12839
12840 static void
12841 do_neon_qmovn (void)
12842 {
12843 struct neon_type_el et = neon_check_type (2, NS_DQ,
12844 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12845 /* Saturating move where operands can be signed or unsigned, and the
12846 destination has the same signedness. */
12847 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12848 if (et.type == NT_unsigned)
12849 inst.instruction |= 0xc0;
12850 else
12851 inst.instruction |= 0x80;
12852 neon_two_same (0, 1, et.size / 2);
12853 }
12854
12855 static void
12856 do_neon_qmovun (void)
12857 {
12858 struct neon_type_el et = neon_check_type (2, NS_DQ,
12859 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12860 /* Saturating move with unsigned results. Operands must be signed. */
12861 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12862 neon_two_same (0, 1, et.size / 2);
12863 }
12864
12865 static void
12866 do_neon_rshift_sat_narrow (void)
12867 {
12868 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12869 or unsigned. If operands are unsigned, results must also be unsigned. */
12870 struct neon_type_el et = neon_check_type (2, NS_DQI,
12871 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12872 int imm = inst.operands[2].imm;
12873 /* This gets the bounds check, size encoding and immediate bits calculation
12874 right. */
12875 et.size /= 2;
12876
12877 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12878 VQMOVN.I<size> <Dd>, <Qm>. */
12879 if (imm == 0)
12880 {
12881 inst.operands[2].present = 0;
12882 inst.instruction = N_MNEM_vqmovn;
12883 do_neon_qmovn ();
12884 return;
12885 }
12886
12887 constraint (imm < 1 || (unsigned)imm > et.size,
12888 _("immediate out of range"));
12889 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12890 }
12891
12892 static void
12893 do_neon_rshift_sat_narrow_u (void)
12894 {
12895 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12896 or unsigned. If operands are unsigned, results must also be unsigned. */
12897 struct neon_type_el et = neon_check_type (2, NS_DQI,
12898 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12899 int imm = inst.operands[2].imm;
12900 /* This gets the bounds check, size encoding and immediate bits calculation
12901 right. */
12902 et.size /= 2;
12903
12904 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12905 VQMOVUN.I<size> <Dd>, <Qm>. */
12906 if (imm == 0)
12907 {
12908 inst.operands[2].present = 0;
12909 inst.instruction = N_MNEM_vqmovun;
12910 do_neon_qmovun ();
12911 return;
12912 }
12913
12914 constraint (imm < 1 || (unsigned)imm > et.size,
12915 _("immediate out of range"));
12916 /* FIXME: The manual is kind of unclear about what value U should have in
12917 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12918 must be 1. */
12919 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12920 }
12921
12922 static void
12923 do_neon_movn (void)
12924 {
12925 struct neon_type_el et = neon_check_type (2, NS_DQ,
12926 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12927 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12928 neon_two_same (0, 1, et.size / 2);
12929 }
12930
12931 static void
12932 do_neon_rshift_narrow (void)
12933 {
12934 struct neon_type_el et = neon_check_type (2, NS_DQI,
12935 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12936 int imm = inst.operands[2].imm;
12937 /* This gets the bounds check, size encoding and immediate bits calculation
12938 right. */
12939 et.size /= 2;
12940
12941 /* If immediate is zero then we are a pseudo-instruction for
12942 VMOVN.I<size> <Dd>, <Qm> */
12943 if (imm == 0)
12944 {
12945 inst.operands[2].present = 0;
12946 inst.instruction = N_MNEM_vmovn;
12947 do_neon_movn ();
12948 return;
12949 }
12950
12951 constraint (imm < 1 || (unsigned)imm > et.size,
12952 _("immediate out of range for narrowing operation"));
12953 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12954 }
12955
12956 static void
12957 do_neon_shll (void)
12958 {
12959 /* FIXME: Type checking when lengthening. */
12960 struct neon_type_el et = neon_check_type (2, NS_QDI,
12961 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12962 unsigned imm = inst.operands[2].imm;
12963
12964 if (imm == et.size)
12965 {
12966 /* Maximum shift variant. */
12967 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12968 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12969 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12970 inst.instruction |= LOW4 (inst.operands[1].reg);
12971 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12972 inst.instruction |= neon_logbits (et.size) << 18;
12973
12974 inst.instruction = neon_dp_fixup (inst.instruction);
12975 }
12976 else
12977 {
12978 /* A more-specific type check for non-max versions. */
12979 et = neon_check_type (2, NS_QDI,
12980 N_EQK | N_DBL, N_SU_32 | N_KEY);
12981 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12982 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12983 }
12984 }
12985
12986 /* Check the various types for the VCVT instruction, and return which version
12987 the current instruction is. */
12988
12989 static int
12990 neon_cvt_flavour (enum neon_shape rs)
12991 {
12992 #define CVT_VAR(C,X,Y) \
12993 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12994 if (et.type != NT_invtype) \
12995 { \
12996 inst.error = NULL; \
12997 return (C); \
12998 }
12999 struct neon_type_el et;
13000 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
13001 || rs == NS_FF) ? N_VFP : 0;
13002 /* The instruction versions which take an immediate take one register
13003 argument, which is extended to the width of the full register. Thus the
13004 "source" and "destination" registers must have the same width. Hack that
13005 here by making the size equal to the key (wider, in this case) operand. */
13006 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
13007
13008 CVT_VAR (0, N_S32, N_F32);
13009 CVT_VAR (1, N_U32, N_F32);
13010 CVT_VAR (2, N_F32, N_S32);
13011 CVT_VAR (3, N_F32, N_U32);
13012 /* Half-precision conversions. */
13013 CVT_VAR (4, N_F32, N_F16);
13014 CVT_VAR (5, N_F16, N_F32);
13015
13016 whole_reg = N_VFP;
13017
13018 /* VFP instructions. */
13019 CVT_VAR (6, N_F32, N_F64);
13020 CVT_VAR (7, N_F64, N_F32);
13021 CVT_VAR (8, N_S32, N_F64 | key);
13022 CVT_VAR (9, N_U32, N_F64 | key);
13023 CVT_VAR (10, N_F64 | key, N_S32);
13024 CVT_VAR (11, N_F64 | key, N_U32);
13025 /* VFP instructions with bitshift. */
13026 CVT_VAR (12, N_F32 | key, N_S16);
13027 CVT_VAR (13, N_F32 | key, N_U16);
13028 CVT_VAR (14, N_F64 | key, N_S16);
13029 CVT_VAR (15, N_F64 | key, N_U16);
13030 CVT_VAR (16, N_S16, N_F32 | key);
13031 CVT_VAR (17, N_U16, N_F32 | key);
13032 CVT_VAR (18, N_S16, N_F64 | key);
13033 CVT_VAR (19, N_U16, N_F64 | key);
13034
13035 return -1;
13036 #undef CVT_VAR
13037 }
13038
13039 /* Neon-syntax VFP conversions. */
13040
13041 static void
13042 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
13043 {
13044 const char *opname = 0;
13045
13046 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
13047 {
13048 /* Conversions with immediate bitshift. */
13049 const char *enc[] =
13050 {
13051 "ftosls",
13052 "ftouls",
13053 "fsltos",
13054 "fultos",
13055 NULL,
13056 NULL,
13057 NULL,
13058 NULL,
13059 "ftosld",
13060 "ftould",
13061 "fsltod",
13062 "fultod",
13063 "fshtos",
13064 "fuhtos",
13065 "fshtod",
13066 "fuhtod",
13067 "ftoshs",
13068 "ftouhs",
13069 "ftoshd",
13070 "ftouhd"
13071 };
13072
13073 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13074 {
13075 opname = enc[flavour];
13076 constraint (inst.operands[0].reg != inst.operands[1].reg,
13077 _("operands 0 and 1 must be the same register"));
13078 inst.operands[1] = inst.operands[2];
13079 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
13080 }
13081 }
13082 else
13083 {
13084 /* Conversions without bitshift. */
13085 const char *enc[] =
13086 {
13087 "ftosis",
13088 "ftouis",
13089 "fsitos",
13090 "fuitos",
13091 "NULL",
13092 "NULL",
13093 "fcvtsd",
13094 "fcvtds",
13095 "ftosid",
13096 "ftouid",
13097 "fsitod",
13098 "fuitod"
13099 };
13100
13101 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13102 opname = enc[flavour];
13103 }
13104
13105 if (opname)
13106 do_vfp_nsyn_opcode (opname);
13107 }
13108
13109 static void
13110 do_vfp_nsyn_cvtz (void)
13111 {
13112 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
13113 int flavour = neon_cvt_flavour (rs);
13114 const char *enc[] =
13115 {
13116 "ftosizs",
13117 "ftouizs",
13118 NULL,
13119 NULL,
13120 NULL,
13121 NULL,
13122 NULL,
13123 NULL,
13124 "ftosizd",
13125 "ftouizd"
13126 };
13127
13128 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
13129 do_vfp_nsyn_opcode (enc[flavour]);
13130 }
13131
13132 static void
13133 do_neon_cvt (void)
13134 {
13135 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
13136 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
13137 int flavour = neon_cvt_flavour (rs);
13138
13139 /* VFP rather than Neon conversions. */
13140 if (flavour >= 6)
13141 {
13142 do_vfp_nsyn_cvt (rs, flavour);
13143 return;
13144 }
13145
13146 switch (rs)
13147 {
13148 case NS_DDI:
13149 case NS_QQI:
13150 {
13151 unsigned immbits;
13152 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13153
13154 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13155 return;
13156
13157 /* Fixed-point conversion with #0 immediate is encoded as an
13158 integer conversion. */
13159 if (inst.operands[2].present && inst.operands[2].imm == 0)
13160 goto int_encode;
13161 immbits = 32 - inst.operands[2].imm;
13162 inst.instruction = NEON_ENC_IMMED (inst.instruction);
13163 if (flavour != -1)
13164 inst.instruction |= enctab[flavour];
13165 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13166 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13167 inst.instruction |= LOW4 (inst.operands[1].reg);
13168 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13169 inst.instruction |= neon_quad (rs) << 6;
13170 inst.instruction |= 1 << 21;
13171 inst.instruction |= immbits << 16;
13172
13173 inst.instruction = neon_dp_fixup (inst.instruction);
13174 }
13175 break;
13176
13177 case NS_DD:
13178 case NS_QQ:
13179 int_encode:
13180 {
13181 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
13182
13183 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13184
13185 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13186 return;
13187
13188 if (flavour != -1)
13189 inst.instruction |= enctab[flavour];
13190
13191 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13192 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13193 inst.instruction |= LOW4 (inst.operands[1].reg);
13194 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13195 inst.instruction |= neon_quad (rs) << 6;
13196 inst.instruction |= 2 << 18;
13197
13198 inst.instruction = neon_dp_fixup (inst.instruction);
13199 }
13200 break;
13201
13202 /* Half-precision conversions for Advanced SIMD -- neon. */
13203 case NS_QD:
13204 case NS_DQ:
13205
13206 if ((rs == NS_DQ)
13207 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
13208 {
13209 as_bad (_("operand size must match register width"));
13210 break;
13211 }
13212
13213 if ((rs == NS_QD)
13214 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
13215 {
13216 as_bad (_("operand size must match register width"));
13217 break;
13218 }
13219
13220 if (rs == NS_DQ)
13221 inst.instruction = 0x3b60600;
13222 else
13223 inst.instruction = 0x3b60700;
13224
13225 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13226 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13227 inst.instruction |= LOW4 (inst.operands[1].reg);
13228 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13229 inst.instruction = neon_dp_fixup (inst.instruction);
13230 break;
13231
13232 default:
13233 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13234 do_vfp_nsyn_cvt (rs, flavour);
13235 }
13236 }
13237
13238 static void
13239 do_neon_cvtb (void)
13240 {
13241 inst.instruction = 0xeb20a40;
13242
13243 /* The sizes are attached to the mnemonic. */
13244 if (inst.vectype.el[0].type != NT_invtype
13245 && inst.vectype.el[0].size == 16)
13246 inst.instruction |= 0x00010000;
13247
13248 /* Programmer's syntax: the sizes are attached to the operands. */
13249 else if (inst.operands[0].vectype.type != NT_invtype
13250 && inst.operands[0].vectype.size == 16)
13251 inst.instruction |= 0x00010000;
13252
13253 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
13254 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
13255 do_vfp_cond_or_thumb ();
13256 }
13257
13258
13259 static void
13260 do_neon_cvtt (void)
13261 {
13262 do_neon_cvtb ();
13263 inst.instruction |= 0x80;
13264 }
13265
13266 static void
13267 neon_move_immediate (void)
13268 {
13269 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
13270 struct neon_type_el et = neon_check_type (2, rs,
13271 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13272 unsigned immlo, immhi = 0, immbits;
13273 int op, cmode, float_p;
13274
13275 constraint (et.type == NT_invtype,
13276 _("operand size must be specified for immediate VMOV"));
13277
13278 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13279 op = (inst.instruction & (1 << 5)) != 0;
13280
13281 immlo = inst.operands[1].imm;
13282 if (inst.operands[1].regisimm)
13283 immhi = inst.operands[1].reg;
13284
13285 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
13286 _("immediate has bits set outside the operand size"));
13287
13288 float_p = inst.operands[1].immisfloat;
13289
13290 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
13291 et.size, et.type)) == FAIL)
13292 {
13293 /* Invert relevant bits only. */
13294 neon_invert_size (&immlo, &immhi, et.size);
13295 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13296 with one or the other; those cases are caught by
13297 neon_cmode_for_move_imm. */
13298 op = !op;
13299 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
13300 &op, et.size, et.type)) == FAIL)
13301 {
13302 first_error (_("immediate out of range"));
13303 return;
13304 }
13305 }
13306
13307 inst.instruction &= ~(1 << 5);
13308 inst.instruction |= op << 5;
13309
13310 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13311 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13312 inst.instruction |= neon_quad (rs) << 6;
13313 inst.instruction |= cmode << 8;
13314
13315 neon_write_immbits (immbits);
13316 }
13317
13318 static void
13319 do_neon_mvn (void)
13320 {
13321 if (inst.operands[1].isreg)
13322 {
13323 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13324
13325 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13326 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13327 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13328 inst.instruction |= LOW4 (inst.operands[1].reg);
13329 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13330 inst.instruction |= neon_quad (rs) << 6;
13331 }
13332 else
13333 {
13334 inst.instruction = NEON_ENC_IMMED (inst.instruction);
13335 neon_move_immediate ();
13336 }
13337
13338 inst.instruction = neon_dp_fixup (inst.instruction);
13339 }
13340
13341 /* Encode instructions of form:
13342
13343 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13344 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
13345
13346 static void
13347 neon_mixed_length (struct neon_type_el et, unsigned size)
13348 {
13349 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13350 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13351 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13352 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13353 inst.instruction |= LOW4 (inst.operands[2].reg);
13354 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13355 inst.instruction |= (et.type == NT_unsigned) << 24;
13356 inst.instruction |= neon_logbits (size) << 20;
13357
13358 inst.instruction = neon_dp_fixup (inst.instruction);
13359 }
13360
13361 static void
13362 do_neon_dyadic_long (void)
13363 {
13364 /* FIXME: Type checking for lengthening op. */
13365 struct neon_type_el et = neon_check_type (3, NS_QDD,
13366 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
13367 neon_mixed_length (et, et.size);
13368 }
13369
13370 static void
13371 do_neon_abal (void)
13372 {
13373 struct neon_type_el et = neon_check_type (3, NS_QDD,
13374 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
13375 neon_mixed_length (et, et.size);
13376 }
13377
13378 static void
13379 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
13380 {
13381 if (inst.operands[2].isscalar)
13382 {
13383 struct neon_type_el et = neon_check_type (3, NS_QDS,
13384 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
13385 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
13386 neon_mul_mac (et, et.type == NT_unsigned);
13387 }
13388 else
13389 {
13390 struct neon_type_el et = neon_check_type (3, NS_QDD,
13391 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
13392 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13393 neon_mixed_length (et, et.size);
13394 }
13395 }
13396
13397 static void
13398 do_neon_mac_maybe_scalar_long (void)
13399 {
13400 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
13401 }
13402
13403 static void
13404 do_neon_dyadic_wide (void)
13405 {
13406 struct neon_type_el et = neon_check_type (3, NS_QQD,
13407 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
13408 neon_mixed_length (et, et.size);
13409 }
13410
13411 static void
13412 do_neon_dyadic_narrow (void)
13413 {
13414 struct neon_type_el et = neon_check_type (3, NS_QDD,
13415 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
13416 /* Operand sign is unimportant, and the U bit is part of the opcode,
13417 so force the operand type to integer. */
13418 et.type = NT_integer;
13419 neon_mixed_length (et, et.size / 2);
13420 }
13421
13422 static void
13423 do_neon_mul_sat_scalar_long (void)
13424 {
13425 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
13426 }
13427
13428 static void
13429 do_neon_vmull (void)
13430 {
13431 if (inst.operands[2].isscalar)
13432 do_neon_mac_maybe_scalar_long ();
13433 else
13434 {
13435 struct neon_type_el et = neon_check_type (3, NS_QDD,
13436 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
13437 if (et.type == NT_poly)
13438 inst.instruction = NEON_ENC_POLY (inst.instruction);
13439 else
13440 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13441 /* For polynomial encoding, size field must be 0b00 and the U bit must be
13442 zero. Should be OK as-is. */
13443 neon_mixed_length (et, et.size);
13444 }
13445 }
13446
13447 static void
13448 do_neon_ext (void)
13449 {
13450 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
13451 struct neon_type_el et = neon_check_type (3, rs,
13452 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13453 unsigned imm = (inst.operands[3].imm * et.size) / 8;
13454
13455 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
13456 _("shift out of range"));
13457 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13458 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13459 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13460 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13461 inst.instruction |= LOW4 (inst.operands[2].reg);
13462 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13463 inst.instruction |= neon_quad (rs) << 6;
13464 inst.instruction |= imm << 8;
13465
13466 inst.instruction = neon_dp_fixup (inst.instruction);
13467 }
13468
13469 static void
13470 do_neon_rev (void)
13471 {
13472 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13473 struct neon_type_el et = neon_check_type (2, rs,
13474 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13475 unsigned op = (inst.instruction >> 7) & 3;
13476 /* N (width of reversed regions) is encoded as part of the bitmask. We
13477 extract it here to check the elements to be reversed are smaller.
13478 Otherwise we'd get a reserved instruction. */
13479 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
13480 assert (elsize != 0);
13481 constraint (et.size >= elsize,
13482 _("elements must be smaller than reversal region"));
13483 neon_two_same (neon_quad (rs), 1, et.size);
13484 }
13485
13486 static void
13487 do_neon_dup (void)
13488 {
13489 if (inst.operands[1].isscalar)
13490 {
13491 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
13492 struct neon_type_el et = neon_check_type (2, rs,
13493 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13494 unsigned sizebits = et.size >> 3;
13495 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
13496 int logsize = neon_logbits (et.size);
13497 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
13498
13499 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
13500 return;
13501
13502 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
13503 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13504 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13505 inst.instruction |= LOW4 (dm);
13506 inst.instruction |= HI1 (dm) << 5;
13507 inst.instruction |= neon_quad (rs) << 6;
13508 inst.instruction |= x << 17;
13509 inst.instruction |= sizebits << 16;
13510
13511 inst.instruction = neon_dp_fixup (inst.instruction);
13512 }
13513 else
13514 {
13515 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
13516 struct neon_type_el et = neon_check_type (2, rs,
13517 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13518 /* Duplicate ARM register to lanes of vector. */
13519 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
13520 switch (et.size)
13521 {
13522 case 8: inst.instruction |= 0x400000; break;
13523 case 16: inst.instruction |= 0x000020; break;
13524 case 32: inst.instruction |= 0x000000; break;
13525 default: break;
13526 }
13527 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13528 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
13529 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
13530 inst.instruction |= neon_quad (rs) << 21;
13531 /* The encoding for this instruction is identical for the ARM and Thumb
13532 variants, except for the condition field. */
13533 do_vfp_cond_or_thumb ();
13534 }
13535 }
13536
13537 /* VMOV has particularly many variations. It can be one of:
13538 0. VMOV<c><q> <Qd>, <Qm>
13539 1. VMOV<c><q> <Dd>, <Dm>
13540 (Register operations, which are VORR with Rm = Rn.)
13541 2. VMOV<c><q>.<dt> <Qd>, #<imm>
13542 3. VMOV<c><q>.<dt> <Dd>, #<imm>
13543 (Immediate loads.)
13544 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
13545 (ARM register to scalar.)
13546 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
13547 (Two ARM registers to vector.)
13548 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
13549 (Scalar to ARM register.)
13550 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
13551 (Vector to two ARM registers.)
13552 8. VMOV.F32 <Sd>, <Sm>
13553 9. VMOV.F64 <Dd>, <Dm>
13554 (VFP register moves.)
13555 10. VMOV.F32 <Sd>, #imm
13556 11. VMOV.F64 <Dd>, #imm
13557 (VFP float immediate load.)
13558 12. VMOV <Rd>, <Sm>
13559 (VFP single to ARM reg.)
13560 13. VMOV <Sd>, <Rm>
13561 (ARM reg to VFP single.)
13562 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
13563 (Two ARM regs to two VFP singles.)
13564 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
13565 (Two VFP singles to two ARM regs.)
13566
13567 These cases can be disambiguated using neon_select_shape, except cases 1/9
13568 and 3/11 which depend on the operand type too.
13569
13570 All the encoded bits are hardcoded by this function.
13571
13572 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
13573 Cases 5, 7 may be used with VFPv2 and above.
13574
13575 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
13576 can specify a type where it doesn't make sense to, and is ignored). */
13577
13578 static void
13579 do_neon_mov (void)
13580 {
13581 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
13582 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
13583 NS_NULL);
13584 struct neon_type_el et;
13585 const char *ldconst = 0;
13586
13587 switch (rs)
13588 {
13589 case NS_DD: /* case 1/9. */
13590 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13591 /* It is not an error here if no type is given. */
13592 inst.error = NULL;
13593 if (et.type == NT_float && et.size == 64)
13594 {
13595 do_vfp_nsyn_opcode ("fcpyd");
13596 break;
13597 }
13598 /* fall through. */
13599
13600 case NS_QQ: /* case 0/1. */
13601 {
13602 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13603 return;
13604 /* The architecture manual I have doesn't explicitly state which
13605 value the U bit should have for register->register moves, but
13606 the equivalent VORR instruction has U = 0, so do that. */
13607 inst.instruction = 0x0200110;
13608 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13609 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13610 inst.instruction |= LOW4 (inst.operands[1].reg);
13611 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13612 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13613 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13614 inst.instruction |= neon_quad (rs) << 6;
13615
13616 inst.instruction = neon_dp_fixup (inst.instruction);
13617 }
13618 break;
13619
13620 case NS_DI: /* case 3/11. */
13621 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13622 inst.error = NULL;
13623 if (et.type == NT_float && et.size == 64)
13624 {
13625 /* case 11 (fconstd). */
13626 ldconst = "fconstd";
13627 goto encode_fconstd;
13628 }
13629 /* fall through. */
13630
13631 case NS_QI: /* case 2/3. */
13632 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13633 return;
13634 inst.instruction = 0x0800010;
13635 neon_move_immediate ();
13636 inst.instruction = neon_dp_fixup (inst.instruction);
13637 break;
13638
13639 case NS_SR: /* case 4. */
13640 {
13641 unsigned bcdebits = 0;
13642 struct neon_type_el et = neon_check_type (2, NS_NULL,
13643 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13644 int logsize = neon_logbits (et.size);
13645 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
13646 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
13647
13648 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13649 _(BAD_FPU));
13650 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13651 && et.size != 32, _(BAD_FPU));
13652 constraint (et.type == NT_invtype, _("bad type for scalar"));
13653 constraint (x >= 64 / et.size, _("scalar index out of range"));
13654
13655 switch (et.size)
13656 {
13657 case 8: bcdebits = 0x8; break;
13658 case 16: bcdebits = 0x1; break;
13659 case 32: bcdebits = 0x0; break;
13660 default: ;
13661 }
13662
13663 bcdebits |= x << logsize;
13664
13665 inst.instruction = 0xe000b10;
13666 do_vfp_cond_or_thumb ();
13667 inst.instruction |= LOW4 (dn) << 16;
13668 inst.instruction |= HI1 (dn) << 7;
13669 inst.instruction |= inst.operands[1].reg << 12;
13670 inst.instruction |= (bcdebits & 3) << 5;
13671 inst.instruction |= (bcdebits >> 2) << 21;
13672 }
13673 break;
13674
13675 case NS_DRR: /* case 5 (fmdrr). */
13676 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13677 _(BAD_FPU));
13678
13679 inst.instruction = 0xc400b10;
13680 do_vfp_cond_or_thumb ();
13681 inst.instruction |= LOW4 (inst.operands[0].reg);
13682 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
13683 inst.instruction |= inst.operands[1].reg << 12;
13684 inst.instruction |= inst.operands[2].reg << 16;
13685 break;
13686
13687 case NS_RS: /* case 6. */
13688 {
13689 struct neon_type_el et = neon_check_type (2, NS_NULL,
13690 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
13691 unsigned logsize = neon_logbits (et.size);
13692 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
13693 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
13694 unsigned abcdebits = 0;
13695
13696 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13697 _(BAD_FPU));
13698 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13699 && et.size != 32, _(BAD_FPU));
13700 constraint (et.type == NT_invtype, _("bad type for scalar"));
13701 constraint (x >= 64 / et.size, _("scalar index out of range"));
13702
13703 switch (et.size)
13704 {
13705 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
13706 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
13707 case 32: abcdebits = 0x00; break;
13708 default: ;
13709 }
13710
13711 abcdebits |= x << logsize;
13712 inst.instruction = 0xe100b10;
13713 do_vfp_cond_or_thumb ();
13714 inst.instruction |= LOW4 (dn) << 16;
13715 inst.instruction |= HI1 (dn) << 7;
13716 inst.instruction |= inst.operands[0].reg << 12;
13717 inst.instruction |= (abcdebits & 3) << 5;
13718 inst.instruction |= (abcdebits >> 2) << 21;
13719 }
13720 break;
13721
13722 case NS_RRD: /* case 7 (fmrrd). */
13723 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13724 _(BAD_FPU));
13725
13726 inst.instruction = 0xc500b10;
13727 do_vfp_cond_or_thumb ();
13728 inst.instruction |= inst.operands[0].reg << 12;
13729 inst.instruction |= inst.operands[1].reg << 16;
13730 inst.instruction |= LOW4 (inst.operands[2].reg);
13731 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13732 break;
13733
13734 case NS_FF: /* case 8 (fcpys). */
13735 do_vfp_nsyn_opcode ("fcpys");
13736 break;
13737
13738 case NS_FI: /* case 10 (fconsts). */
13739 ldconst = "fconsts";
13740 encode_fconstd:
13741 if (is_quarter_float (inst.operands[1].imm))
13742 {
13743 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
13744 do_vfp_nsyn_opcode (ldconst);
13745 }
13746 else
13747 first_error (_("immediate out of range"));
13748 break;
13749
13750 case NS_RF: /* case 12 (fmrs). */
13751 do_vfp_nsyn_opcode ("fmrs");
13752 break;
13753
13754 case NS_FR: /* case 13 (fmsr). */
13755 do_vfp_nsyn_opcode ("fmsr");
13756 break;
13757
13758 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13759 (one of which is a list), but we have parsed four. Do some fiddling to
13760 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13761 expect. */
13762 case NS_RRFF: /* case 14 (fmrrs). */
13763 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
13764 _("VFP registers must be adjacent"));
13765 inst.operands[2].imm = 2;
13766 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13767 do_vfp_nsyn_opcode ("fmrrs");
13768 break;
13769
13770 case NS_FFRR: /* case 15 (fmsrr). */
13771 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13772 _("VFP registers must be adjacent"));
13773 inst.operands[1] = inst.operands[2];
13774 inst.operands[2] = inst.operands[3];
13775 inst.operands[0].imm = 2;
13776 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13777 do_vfp_nsyn_opcode ("fmsrr");
13778 break;
13779
13780 default:
13781 abort ();
13782 }
13783 }
13784
13785 static void
13786 do_neon_rshift_round_imm (void)
13787 {
13788 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13789 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13790 int imm = inst.operands[2].imm;
13791
13792 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13793 if (imm == 0)
13794 {
13795 inst.operands[2].present = 0;
13796 do_neon_mov ();
13797 return;
13798 }
13799
13800 constraint (imm < 1 || (unsigned)imm > et.size,
13801 _("immediate out of range for shift"));
13802 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13803 et.size - imm);
13804 }
13805
13806 static void
13807 do_neon_movl (void)
13808 {
13809 struct neon_type_el et = neon_check_type (2, NS_QD,
13810 N_EQK | N_DBL, N_SU_32 | N_KEY);
13811 unsigned sizebits = et.size >> 3;
13812 inst.instruction |= sizebits << 19;
13813 neon_two_same (0, et.type == NT_unsigned, -1);
13814 }
13815
13816 static void
13817 do_neon_trn (void)
13818 {
13819 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13820 struct neon_type_el et = neon_check_type (2, rs,
13821 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13822 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13823 neon_two_same (neon_quad (rs), 1, et.size);
13824 }
13825
13826 static void
13827 do_neon_zip_uzp (void)
13828 {
13829 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13830 struct neon_type_el et = neon_check_type (2, rs,
13831 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13832 if (rs == NS_DD && et.size == 32)
13833 {
13834 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13835 inst.instruction = N_MNEM_vtrn;
13836 do_neon_trn ();
13837 return;
13838 }
13839 neon_two_same (neon_quad (rs), 1, et.size);
13840 }
13841
13842 static void
13843 do_neon_sat_abs_neg (void)
13844 {
13845 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13846 struct neon_type_el et = neon_check_type (2, rs,
13847 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13848 neon_two_same (neon_quad (rs), 1, et.size);
13849 }
13850
13851 static void
13852 do_neon_pair_long (void)
13853 {
13854 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13855 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13856 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13857 inst.instruction |= (et.type == NT_unsigned) << 7;
13858 neon_two_same (neon_quad (rs), 1, et.size);
13859 }
13860
13861 static void
13862 do_neon_recip_est (void)
13863 {
13864 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13865 struct neon_type_el et = neon_check_type (2, rs,
13866 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13867 inst.instruction |= (et.type == NT_float) << 8;
13868 neon_two_same (neon_quad (rs), 1, et.size);
13869 }
13870
13871 static void
13872 do_neon_cls (void)
13873 {
13874 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13875 struct neon_type_el et = neon_check_type (2, rs,
13876 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13877 neon_two_same (neon_quad (rs), 1, et.size);
13878 }
13879
13880 static void
13881 do_neon_clz (void)
13882 {
13883 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13884 struct neon_type_el et = neon_check_type (2, rs,
13885 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13886 neon_two_same (neon_quad (rs), 1, et.size);
13887 }
13888
13889 static void
13890 do_neon_cnt (void)
13891 {
13892 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13893 struct neon_type_el et = neon_check_type (2, rs,
13894 N_EQK | N_INT, N_8 | N_KEY);
13895 neon_two_same (neon_quad (rs), 1, et.size);
13896 }
13897
13898 static void
13899 do_neon_swp (void)
13900 {
13901 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13902 neon_two_same (neon_quad (rs), 1, -1);
13903 }
13904
13905 static void
13906 do_neon_tbl_tbx (void)
13907 {
13908 unsigned listlenbits;
13909 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13910
13911 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13912 {
13913 first_error (_("bad list length for table lookup"));
13914 return;
13915 }
13916
13917 listlenbits = inst.operands[1].imm - 1;
13918 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13919 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13920 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13921 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13922 inst.instruction |= LOW4 (inst.operands[2].reg);
13923 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13924 inst.instruction |= listlenbits << 8;
13925
13926 inst.instruction = neon_dp_fixup (inst.instruction);
13927 }
13928
13929 static void
13930 do_neon_ldm_stm (void)
13931 {
13932 /* P, U and L bits are part of bitmask. */
13933 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13934 unsigned offsetbits = inst.operands[1].imm * 2;
13935
13936 if (inst.operands[1].issingle)
13937 {
13938 do_vfp_nsyn_ldm_stm (is_dbmode);
13939 return;
13940 }
13941
13942 constraint (is_dbmode && !inst.operands[0].writeback,
13943 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13944
13945 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13946 _("register list must contain at least 1 and at most 16 "
13947 "registers"));
13948
13949 inst.instruction |= inst.operands[0].reg << 16;
13950 inst.instruction |= inst.operands[0].writeback << 21;
13951 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13952 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13953
13954 inst.instruction |= offsetbits;
13955
13956 do_vfp_cond_or_thumb ();
13957 }
13958
13959 static void
13960 do_neon_ldr_str (void)
13961 {
13962 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13963
13964 if (inst.operands[0].issingle)
13965 {
13966 if (is_ldr)
13967 do_vfp_nsyn_opcode ("flds");
13968 else
13969 do_vfp_nsyn_opcode ("fsts");
13970 }
13971 else
13972 {
13973 if (is_ldr)
13974 do_vfp_nsyn_opcode ("fldd");
13975 else
13976 do_vfp_nsyn_opcode ("fstd");
13977 }
13978 }
13979
13980 /* "interleave" version also handles non-interleaving register VLD1/VST1
13981 instructions. */
13982
13983 static void
13984 do_neon_ld_st_interleave (void)
13985 {
13986 struct neon_type_el et = neon_check_type (1, NS_NULL,
13987 N_8 | N_16 | N_32 | N_64);
13988 unsigned alignbits = 0;
13989 unsigned idx;
13990 /* The bits in this table go:
13991 0: register stride of one (0) or two (1)
13992 1,2: register list length, minus one (1, 2, 3, 4).
13993 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13994 We use -1 for invalid entries. */
13995 const int typetable[] =
13996 {
13997 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13998 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13999 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14000 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14001 };
14002 int typebits;
14003
14004 if (et.type == NT_invtype)
14005 return;
14006
14007 if (inst.operands[1].immisalign)
14008 switch (inst.operands[1].imm >> 8)
14009 {
14010 case 64: alignbits = 1; break;
14011 case 128:
14012 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14013 goto bad_alignment;
14014 alignbits = 2;
14015 break;
14016 case 256:
14017 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14018 goto bad_alignment;
14019 alignbits = 3;
14020 break;
14021 default:
14022 bad_alignment:
14023 first_error (_("bad alignment"));
14024 return;
14025 }
14026
14027 inst.instruction |= alignbits << 4;
14028 inst.instruction |= neon_logbits (et.size) << 6;
14029
14030 /* Bits [4:6] of the immediate in a list specifier encode register stride
14031 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14032 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14033 up the right value for "type" in a table based on this value and the given
14034 list style, then stick it back. */
14035 idx = ((inst.operands[0].imm >> 4) & 7)
14036 | (((inst.instruction >> 8) & 3) << 3);
14037
14038 typebits = typetable[idx];
14039
14040 constraint (typebits == -1, _("bad list type for instruction"));
14041
14042 inst.instruction &= ~0xf00;
14043 inst.instruction |= typebits << 8;
14044 }
14045
14046 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14047 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14048 otherwise. The variable arguments are a list of pairs of legal (size, align)
14049 values, terminated with -1. */
14050
14051 static int
14052 neon_alignment_bit (int size, int align, int *do_align, ...)
14053 {
14054 va_list ap;
14055 int result = FAIL, thissize, thisalign;
14056
14057 if (!inst.operands[1].immisalign)
14058 {
14059 *do_align = 0;
14060 return SUCCESS;
14061 }
14062
14063 va_start (ap, do_align);
14064
14065 do
14066 {
14067 thissize = va_arg (ap, int);
14068 if (thissize == -1)
14069 break;
14070 thisalign = va_arg (ap, int);
14071
14072 if (size == thissize && align == thisalign)
14073 result = SUCCESS;
14074 }
14075 while (result != SUCCESS);
14076
14077 va_end (ap);
14078
14079 if (result == SUCCESS)
14080 *do_align = 1;
14081 else
14082 first_error (_("unsupported alignment for instruction"));
14083
14084 return result;
14085 }
14086
14087 static void
14088 do_neon_ld_st_lane (void)
14089 {
14090 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14091 int align_good, do_align = 0;
14092 int logsize = neon_logbits (et.size);
14093 int align = inst.operands[1].imm >> 8;
14094 int n = (inst.instruction >> 8) & 3;
14095 int max_el = 64 / et.size;
14096
14097 if (et.type == NT_invtype)
14098 return;
14099
14100 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
14101 _("bad list length"));
14102 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
14103 _("scalar index out of range"));
14104 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
14105 && et.size == 8,
14106 _("stride of 2 unavailable when element size is 8"));
14107
14108 switch (n)
14109 {
14110 case 0: /* VLD1 / VST1. */
14111 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
14112 32, 32, -1);
14113 if (align_good == FAIL)
14114 return;
14115 if (do_align)
14116 {
14117 unsigned alignbits = 0;
14118 switch (et.size)
14119 {
14120 case 16: alignbits = 0x1; break;
14121 case 32: alignbits = 0x3; break;
14122 default: ;
14123 }
14124 inst.instruction |= alignbits << 4;
14125 }
14126 break;
14127
14128 case 1: /* VLD2 / VST2. */
14129 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
14130 32, 64, -1);
14131 if (align_good == FAIL)
14132 return;
14133 if (do_align)
14134 inst.instruction |= 1 << 4;
14135 break;
14136
14137 case 2: /* VLD3 / VST3. */
14138 constraint (inst.operands[1].immisalign,
14139 _("can't use alignment with this instruction"));
14140 break;
14141
14142 case 3: /* VLD4 / VST4. */
14143 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14144 16, 64, 32, 64, 32, 128, -1);
14145 if (align_good == FAIL)
14146 return;
14147 if (do_align)
14148 {
14149 unsigned alignbits = 0;
14150 switch (et.size)
14151 {
14152 case 8: alignbits = 0x1; break;
14153 case 16: alignbits = 0x1; break;
14154 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
14155 default: ;
14156 }
14157 inst.instruction |= alignbits << 4;
14158 }
14159 break;
14160
14161 default: ;
14162 }
14163
14164 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14165 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14166 inst.instruction |= 1 << (4 + logsize);
14167
14168 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
14169 inst.instruction |= logsize << 10;
14170 }
14171
14172 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14173
14174 static void
14175 do_neon_ld_dup (void)
14176 {
14177 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14178 int align_good, do_align = 0;
14179
14180 if (et.type == NT_invtype)
14181 return;
14182
14183 switch ((inst.instruction >> 8) & 3)
14184 {
14185 case 0: /* VLD1. */
14186 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
14187 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14188 &do_align, 16, 16, 32, 32, -1);
14189 if (align_good == FAIL)
14190 return;
14191 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
14192 {
14193 case 1: break;
14194 case 2: inst.instruction |= 1 << 5; break;
14195 default: first_error (_("bad list length")); return;
14196 }
14197 inst.instruction |= neon_logbits (et.size) << 6;
14198 break;
14199
14200 case 1: /* VLD2. */
14201 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14202 &do_align, 8, 16, 16, 32, 32, 64, -1);
14203 if (align_good == FAIL)
14204 return;
14205 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
14206 _("bad list length"));
14207 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14208 inst.instruction |= 1 << 5;
14209 inst.instruction |= neon_logbits (et.size) << 6;
14210 break;
14211
14212 case 2: /* VLD3. */
14213 constraint (inst.operands[1].immisalign,
14214 _("can't use alignment with this instruction"));
14215 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
14216 _("bad list length"));
14217 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14218 inst.instruction |= 1 << 5;
14219 inst.instruction |= neon_logbits (et.size) << 6;
14220 break;
14221
14222 case 3: /* VLD4. */
14223 {
14224 int align = inst.operands[1].imm >> 8;
14225 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14226 16, 64, 32, 64, 32, 128, -1);
14227 if (align_good == FAIL)
14228 return;
14229 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
14230 _("bad list length"));
14231 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14232 inst.instruction |= 1 << 5;
14233 if (et.size == 32 && align == 128)
14234 inst.instruction |= 0x3 << 6;
14235 else
14236 inst.instruction |= neon_logbits (et.size) << 6;
14237 }
14238 break;
14239
14240 default: ;
14241 }
14242
14243 inst.instruction |= do_align << 4;
14244 }
14245
14246 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14247 apart from bits [11:4]. */
14248
14249 static void
14250 do_neon_ldx_stx (void)
14251 {
14252 switch (NEON_LANE (inst.operands[0].imm))
14253 {
14254 case NEON_INTERLEAVE_LANES:
14255 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
14256 do_neon_ld_st_interleave ();
14257 break;
14258
14259 case NEON_ALL_LANES:
14260 inst.instruction = NEON_ENC_DUP (inst.instruction);
14261 do_neon_ld_dup ();
14262 break;
14263
14264 default:
14265 inst.instruction = NEON_ENC_LANE (inst.instruction);
14266 do_neon_ld_st_lane ();
14267 }
14268
14269 /* L bit comes from bit mask. */
14270 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14271 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14272 inst.instruction |= inst.operands[1].reg << 16;
14273
14274 if (inst.operands[1].postind)
14275 {
14276 int postreg = inst.operands[1].imm & 0xf;
14277 constraint (!inst.operands[1].immisreg,
14278 _("post-index must be a register"));
14279 constraint (postreg == 0xd || postreg == 0xf,
14280 _("bad register for post-index"));
14281 inst.instruction |= postreg;
14282 }
14283 else if (inst.operands[1].writeback)
14284 {
14285 inst.instruction |= 0xd;
14286 }
14287 else
14288 inst.instruction |= 0xf;
14289
14290 if (thumb_mode)
14291 inst.instruction |= 0xf9000000;
14292 else
14293 inst.instruction |= 0xf4000000;
14294 }
14295 \f
14296 /* Overall per-instruction processing. */
14297
14298 /* We need to be able to fix up arbitrary expressions in some statements.
14299 This is so that we can handle symbols that are an arbitrary distance from
14300 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14301 which returns part of an address in a form which will be valid for
14302 a data instruction. We do this by pushing the expression into a symbol
14303 in the expr_section, and creating a fix for that. */
14304
14305 static void
14306 fix_new_arm (fragS * frag,
14307 int where,
14308 short int size,
14309 expressionS * exp,
14310 int pc_rel,
14311 int reloc)
14312 {
14313 fixS * new_fix;
14314
14315 switch (exp->X_op)
14316 {
14317 case O_constant:
14318 case O_symbol:
14319 case O_add:
14320 case O_subtract:
14321 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
14322 break;
14323
14324 default:
14325 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
14326 pc_rel, reloc);
14327 break;
14328 }
14329
14330 /* Mark whether the fix is to a THUMB instruction, or an ARM
14331 instruction. */
14332 new_fix->tc_fix_data = thumb_mode;
14333 }
14334
14335 /* Create a frg for an instruction requiring relaxation. */
14336 static void
14337 output_relax_insn (void)
14338 {
14339 char * to;
14340 symbolS *sym;
14341 int offset;
14342
14343 /* The size of the instruction is unknown, so tie the debug info to the
14344 start of the instruction. */
14345 dwarf2_emit_insn (0);
14346
14347 switch (inst.reloc.exp.X_op)
14348 {
14349 case O_symbol:
14350 sym = inst.reloc.exp.X_add_symbol;
14351 offset = inst.reloc.exp.X_add_number;
14352 break;
14353 case O_constant:
14354 sym = NULL;
14355 offset = inst.reloc.exp.X_add_number;
14356 break;
14357 default:
14358 sym = make_expr_symbol (&inst.reloc.exp);
14359 offset = 0;
14360 break;
14361 }
14362 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
14363 inst.relax, sym, offset, NULL/*offset, opcode*/);
14364 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
14365 }
14366
14367 /* Write a 32-bit thumb instruction to buf. */
14368 static void
14369 put_thumb32_insn (char * buf, unsigned long insn)
14370 {
14371 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
14372 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
14373 }
14374
14375 static void
14376 output_inst (const char * str)
14377 {
14378 char * to = NULL;
14379
14380 if (inst.error)
14381 {
14382 as_bad ("%s -- `%s'", inst.error, str);
14383 return;
14384 }
14385 if (inst.relax)
14386 {
14387 output_relax_insn ();
14388 return;
14389 }
14390 if (inst.size == 0)
14391 return;
14392
14393 to = frag_more (inst.size);
14394 /* PR 9814: Record the thumb mode into the current frag so that we know
14395 what type of NOP padding to use, if necessary. We override any previous
14396 setting so that if the mode has changed then the NOPS that we use will
14397 match the encoding of the last instruction in the frag. */
14398 frag_now->tc_frag_data = thumb_mode | MODE_RECORDED;
14399
14400 if (thumb_mode && (inst.size > THUMB_SIZE))
14401 {
14402 assert (inst.size == (2 * THUMB_SIZE));
14403 put_thumb32_insn (to, inst.instruction);
14404 }
14405 else if (inst.size > INSN_SIZE)
14406 {
14407 assert (inst.size == (2 * INSN_SIZE));
14408 md_number_to_chars (to, inst.instruction, INSN_SIZE);
14409 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
14410 }
14411 else
14412 md_number_to_chars (to, inst.instruction, inst.size);
14413
14414 if (inst.reloc.type != BFD_RELOC_UNUSED)
14415 fix_new_arm (frag_now, to - frag_now->fr_literal,
14416 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
14417 inst.reloc.type);
14418
14419 dwarf2_emit_insn (inst.size);
14420 }
14421
14422 /* Tag values used in struct asm_opcode's tag field. */
14423 enum opcode_tag
14424 {
14425 OT_unconditional, /* Instruction cannot be conditionalized.
14426 The ARM condition field is still 0xE. */
14427 OT_unconditionalF, /* Instruction cannot be conditionalized
14428 and carries 0xF in its ARM condition field. */
14429 OT_csuffix, /* Instruction takes a conditional suffix. */
14430 OT_csuffixF, /* Some forms of the instruction take a conditional
14431 suffix, others place 0xF where the condition field
14432 would be. */
14433 OT_cinfix3, /* Instruction takes a conditional infix,
14434 beginning at character index 3. (In
14435 unified mode, it becomes a suffix.) */
14436 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
14437 tsts, cmps, cmns, and teqs. */
14438 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
14439 character index 3, even in unified mode. Used for
14440 legacy instructions where suffix and infix forms
14441 may be ambiguous. */
14442 OT_csuf_or_in3, /* Instruction takes either a conditional
14443 suffix or an infix at character index 3. */
14444 OT_odd_infix_unc, /* This is the unconditional variant of an
14445 instruction that takes a conditional infix
14446 at an unusual position. In unified mode,
14447 this variant will accept a suffix. */
14448 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
14449 are the conditional variants of instructions that
14450 take conditional infixes in unusual positions.
14451 The infix appears at character index
14452 (tag - OT_odd_infix_0). These are not accepted
14453 in unified mode. */
14454 };
14455
14456 /* Subroutine of md_assemble, responsible for looking up the primary
14457 opcode from the mnemonic the user wrote. STR points to the
14458 beginning of the mnemonic.
14459
14460 This is not simply a hash table lookup, because of conditional
14461 variants. Most instructions have conditional variants, which are
14462 expressed with a _conditional affix_ to the mnemonic. If we were
14463 to encode each conditional variant as a literal string in the opcode
14464 table, it would have approximately 20,000 entries.
14465
14466 Most mnemonics take this affix as a suffix, and in unified syntax,
14467 'most' is upgraded to 'all'. However, in the divided syntax, some
14468 instructions take the affix as an infix, notably the s-variants of
14469 the arithmetic instructions. Of those instructions, all but six
14470 have the infix appear after the third character of the mnemonic.
14471
14472 Accordingly, the algorithm for looking up primary opcodes given
14473 an identifier is:
14474
14475 1. Look up the identifier in the opcode table.
14476 If we find a match, go to step U.
14477
14478 2. Look up the last two characters of the identifier in the
14479 conditions table. If we find a match, look up the first N-2
14480 characters of the identifier in the opcode table. If we
14481 find a match, go to step CE.
14482
14483 3. Look up the fourth and fifth characters of the identifier in
14484 the conditions table. If we find a match, extract those
14485 characters from the identifier, and look up the remaining
14486 characters in the opcode table. If we find a match, go
14487 to step CM.
14488
14489 4. Fail.
14490
14491 U. Examine the tag field of the opcode structure, in case this is
14492 one of the six instructions with its conditional infix in an
14493 unusual place. If it is, the tag tells us where to find the
14494 infix; look it up in the conditions table and set inst.cond
14495 accordingly. Otherwise, this is an unconditional instruction.
14496 Again set inst.cond accordingly. Return the opcode structure.
14497
14498 CE. Examine the tag field to make sure this is an instruction that
14499 should receive a conditional suffix. If it is not, fail.
14500 Otherwise, set inst.cond from the suffix we already looked up,
14501 and return the opcode structure.
14502
14503 CM. Examine the tag field to make sure this is an instruction that
14504 should receive a conditional infix after the third character.
14505 If it is not, fail. Otherwise, undo the edits to the current
14506 line of input and proceed as for case CE. */
14507
14508 static const struct asm_opcode *
14509 opcode_lookup (char **str)
14510 {
14511 char *end, *base;
14512 char *affix;
14513 const struct asm_opcode *opcode;
14514 const struct asm_cond *cond;
14515 char save[2];
14516 bfd_boolean neon_supported;
14517
14518 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
14519
14520 /* Scan up to the end of the mnemonic, which must end in white space,
14521 '.' (in unified mode, or for Neon instructions), or end of string. */
14522 for (base = end = *str; *end != '\0'; end++)
14523 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
14524 break;
14525
14526 if (end == base)
14527 return 0;
14528
14529 /* Handle a possible width suffix and/or Neon type suffix. */
14530 if (end[0] == '.')
14531 {
14532 int offset = 2;
14533
14534 /* The .w and .n suffixes are only valid if the unified syntax is in
14535 use. */
14536 if (unified_syntax && end[1] == 'w')
14537 inst.size_req = 4;
14538 else if (unified_syntax && end[1] == 'n')
14539 inst.size_req = 2;
14540 else
14541 offset = 0;
14542
14543 inst.vectype.elems = 0;
14544
14545 *str = end + offset;
14546
14547 if (end[offset] == '.')
14548 {
14549 /* See if we have a Neon type suffix (possible in either unified or
14550 non-unified ARM syntax mode). */
14551 if (parse_neon_type (&inst.vectype, str) == FAIL)
14552 return 0;
14553 }
14554 else if (end[offset] != '\0' && end[offset] != ' ')
14555 return 0;
14556 }
14557 else
14558 *str = end;
14559
14560 /* Look for unaffixed or special-case affixed mnemonic. */
14561 opcode = hash_find_n (arm_ops_hsh, base, end - base);
14562 if (opcode)
14563 {
14564 /* step U */
14565 if (opcode->tag < OT_odd_infix_0)
14566 {
14567 inst.cond = COND_ALWAYS;
14568 return opcode;
14569 }
14570
14571 if (warn_on_deprecated && unified_syntax)
14572 as_warn (_("conditional infixes are deprecated in unified syntax"));
14573 affix = base + (opcode->tag - OT_odd_infix_0);
14574 cond = hash_find_n (arm_cond_hsh, affix, 2);
14575 assert (cond);
14576
14577 inst.cond = cond->value;
14578 return opcode;
14579 }
14580
14581 /* Cannot have a conditional suffix on a mnemonic of less than two
14582 characters. */
14583 if (end - base < 3)
14584 return 0;
14585
14586 /* Look for suffixed mnemonic. */
14587 affix = end - 2;
14588 cond = hash_find_n (arm_cond_hsh, affix, 2);
14589 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
14590 if (opcode && cond)
14591 {
14592 /* step CE */
14593 switch (opcode->tag)
14594 {
14595 case OT_cinfix3_legacy:
14596 /* Ignore conditional suffixes matched on infix only mnemonics. */
14597 break;
14598
14599 case OT_cinfix3:
14600 case OT_cinfix3_deprecated:
14601 case OT_odd_infix_unc:
14602 if (!unified_syntax)
14603 return 0;
14604 /* else fall through */
14605
14606 case OT_csuffix:
14607 case OT_csuffixF:
14608 case OT_csuf_or_in3:
14609 inst.cond = cond->value;
14610 return opcode;
14611
14612 case OT_unconditional:
14613 case OT_unconditionalF:
14614 if (thumb_mode)
14615 {
14616 inst.cond = cond->value;
14617 }
14618 else
14619 {
14620 /* delayed diagnostic */
14621 inst.error = BAD_COND;
14622 inst.cond = COND_ALWAYS;
14623 }
14624 return opcode;
14625
14626 default:
14627 return 0;
14628 }
14629 }
14630
14631 /* Cannot have a usual-position infix on a mnemonic of less than
14632 six characters (five would be a suffix). */
14633 if (end - base < 6)
14634 return 0;
14635
14636 /* Look for infixed mnemonic in the usual position. */
14637 affix = base + 3;
14638 cond = hash_find_n (arm_cond_hsh, affix, 2);
14639 if (!cond)
14640 return 0;
14641
14642 memcpy (save, affix, 2);
14643 memmove (affix, affix + 2, (end - affix) - 2);
14644 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
14645 memmove (affix + 2, affix, (end - affix) - 2);
14646 memcpy (affix, save, 2);
14647
14648 if (opcode
14649 && (opcode->tag == OT_cinfix3
14650 || opcode->tag == OT_cinfix3_deprecated
14651 || opcode->tag == OT_csuf_or_in3
14652 || opcode->tag == OT_cinfix3_legacy))
14653 {
14654 /* step CM */
14655 if (warn_on_deprecated && unified_syntax
14656 && (opcode->tag == OT_cinfix3
14657 || opcode->tag == OT_cinfix3_deprecated))
14658 as_warn (_("conditional infixes are deprecated in unified syntax"));
14659
14660 inst.cond = cond->value;
14661 return opcode;
14662 }
14663
14664 return 0;
14665 }
14666
14667 void
14668 md_assemble (char *str)
14669 {
14670 char *p = str;
14671 const struct asm_opcode * opcode;
14672
14673 /* Align the previous label if needed. */
14674 if (last_label_seen != NULL)
14675 {
14676 symbol_set_frag (last_label_seen, frag_now);
14677 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
14678 S_SET_SEGMENT (last_label_seen, now_seg);
14679 }
14680
14681 memset (&inst, '\0', sizeof (inst));
14682 inst.reloc.type = BFD_RELOC_UNUSED;
14683
14684 opcode = opcode_lookup (&p);
14685 if (!opcode)
14686 {
14687 /* It wasn't an instruction, but it might be a register alias of
14688 the form alias .req reg, or a Neon .dn/.qn directive. */
14689 if (!create_register_alias (str, p)
14690 && !create_neon_reg_alias (str, p))
14691 as_bad (_("bad instruction `%s'"), str);
14692
14693 return;
14694 }
14695
14696 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
14697 as_warn (_("s suffix on comparison instruction is deprecated"));
14698
14699 /* The value which unconditional instructions should have in place of the
14700 condition field. */
14701 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
14702
14703 if (thumb_mode)
14704 {
14705 arm_feature_set variant;
14706
14707 variant = cpu_variant;
14708 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14709 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
14710 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
14711 /* Check that this instruction is supported for this CPU. */
14712 if (!opcode->tvariant
14713 || (thumb_mode == 1
14714 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
14715 {
14716 as_bad (_("selected processor does not support `%s'"), str);
14717 return;
14718 }
14719 if (inst.cond != COND_ALWAYS && !unified_syntax
14720 && opcode->tencode != do_t_branch)
14721 {
14722 as_bad (_("Thumb does not support conditional execution"));
14723 return;
14724 }
14725
14726 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2) && !inst.size_req)
14727 {
14728 /* Implicit require narrow instructions on Thumb-1. This avoids
14729 relaxation accidentally introducing Thumb-2 instructions. */
14730 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
14731 && !ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr))
14732 inst.size_req = 2;
14733 }
14734
14735 /* Check conditional suffixes. */
14736 if (current_it_mask)
14737 {
14738 int cond;
14739 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
14740 current_it_mask <<= 1;
14741 current_it_mask &= 0x1f;
14742 /* The BKPT instruction is unconditional even in an IT block. */
14743 if (!inst.error
14744 && cond != inst.cond && opcode->tencode != do_t_bkpt)
14745 {
14746 as_bad (_("incorrect condition in IT block"));
14747 return;
14748 }
14749 }
14750 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
14751 {
14752 as_bad (_("thumb conditional instruction not in IT block"));
14753 return;
14754 }
14755
14756 mapping_state (MAP_THUMB);
14757 inst.instruction = opcode->tvalue;
14758
14759 if (!parse_operands (p, opcode->operands))
14760 opcode->tencode ();
14761
14762 /* Clear current_it_mask at the end of an IT block. */
14763 if (current_it_mask == 0x10)
14764 current_it_mask = 0;
14765
14766 if (!(inst.error || inst.relax))
14767 {
14768 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
14769 inst.size = (inst.instruction > 0xffff ? 4 : 2);
14770 if (inst.size_req && inst.size_req != inst.size)
14771 {
14772 as_bad (_("cannot honor width suffix -- `%s'"), str);
14773 return;
14774 }
14775 }
14776
14777 /* Something has gone badly wrong if we try to relax a fixed size
14778 instruction. */
14779 assert (inst.size_req == 0 || !inst.relax);
14780
14781 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14782 *opcode->tvariant);
14783 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14784 set those bits when Thumb-2 32-bit instructions are seen. ie.
14785 anything other than bl/blx and v6-M instructions.
14786 This is overly pessimistic for relaxable instructions. */
14787 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
14788 || inst.relax)
14789 && !ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr))
14790 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14791 arm_ext_v6t2);
14792 }
14793 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
14794 {
14795 bfd_boolean is_bx;
14796
14797 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
14798 is_bx = (opcode->aencode == do_bx);
14799
14800 /* Check that this instruction is supported for this CPU. */
14801 if (!(is_bx && fix_v4bx)
14802 && !(opcode->avariant &&
14803 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
14804 {
14805 as_bad (_("selected processor does not support `%s'"), str);
14806 return;
14807 }
14808 if (inst.size_req)
14809 {
14810 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14811 return;
14812 }
14813
14814 mapping_state (MAP_ARM);
14815 inst.instruction = opcode->avalue;
14816 if (opcode->tag == OT_unconditionalF)
14817 inst.instruction |= 0xF << 28;
14818 else
14819 inst.instruction |= inst.cond << 28;
14820 inst.size = INSN_SIZE;
14821 if (!parse_operands (p, opcode->operands))
14822 opcode->aencode ();
14823 /* Arm mode bx is marked as both v4T and v5 because it's still required
14824 on a hypothetical non-thumb v5 core. */
14825 if (is_bx)
14826 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14827 else
14828 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14829 *opcode->avariant);
14830 }
14831 else
14832 {
14833 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14834 "-- `%s'"), str);
14835 return;
14836 }
14837 output_inst (str);
14838 }
14839
14840 /* Various frobbings of labels and their addresses. */
14841
14842 void
14843 arm_start_line_hook (void)
14844 {
14845 last_label_seen = NULL;
14846 }
14847
14848 void
14849 arm_frob_label (symbolS * sym)
14850 {
14851 last_label_seen = sym;
14852
14853 ARM_SET_THUMB (sym, thumb_mode);
14854
14855 #if defined OBJ_COFF || defined OBJ_ELF
14856 ARM_SET_INTERWORK (sym, support_interwork);
14857 #endif
14858
14859 /* Note - do not allow local symbols (.Lxxx) to be labelled
14860 as Thumb functions. This is because these labels, whilst
14861 they exist inside Thumb code, are not the entry points for
14862 possible ARM->Thumb calls. Also, these labels can be used
14863 as part of a computed goto or switch statement. eg gcc
14864 can generate code that looks like this:
14865
14866 ldr r2, [pc, .Laaa]
14867 lsl r3, r3, #2
14868 ldr r2, [r3, r2]
14869 mov pc, r2
14870
14871 .Lbbb: .word .Lxxx
14872 .Lccc: .word .Lyyy
14873 ..etc...
14874 .Laaa: .word Lbbb
14875
14876 The first instruction loads the address of the jump table.
14877 The second instruction converts a table index into a byte offset.
14878 The third instruction gets the jump address out of the table.
14879 The fourth instruction performs the jump.
14880
14881 If the address stored at .Laaa is that of a symbol which has the
14882 Thumb_Func bit set, then the linker will arrange for this address
14883 to have the bottom bit set, which in turn would mean that the
14884 address computation performed by the third instruction would end
14885 up with the bottom bit set. Since the ARM is capable of unaligned
14886 word loads, the instruction would then load the incorrect address
14887 out of the jump table, and chaos would ensue. */
14888 if (label_is_thumb_function_name
14889 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14890 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14891 {
14892 /* When the address of a Thumb function is taken the bottom
14893 bit of that address should be set. This will allow
14894 interworking between Arm and Thumb functions to work
14895 correctly. */
14896
14897 THUMB_SET_FUNC (sym, 1);
14898
14899 label_is_thumb_function_name = FALSE;
14900 }
14901
14902 dwarf2_emit_label (sym);
14903 }
14904
14905 int
14906 arm_data_in_code (void)
14907 {
14908 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14909 {
14910 *input_line_pointer = '/';
14911 input_line_pointer += 5;
14912 *input_line_pointer = 0;
14913 return 1;
14914 }
14915
14916 return 0;
14917 }
14918
14919 char *
14920 arm_canonicalize_symbol_name (char * name)
14921 {
14922 int len;
14923
14924 if (thumb_mode && (len = strlen (name)) > 5
14925 && streq (name + len - 5, "/data"))
14926 *(name + len - 5) = 0;
14927
14928 return name;
14929 }
14930 \f
14931 /* Table of all register names defined by default. The user can
14932 define additional names with .req. Note that all register names
14933 should appear in both upper and lowercase variants. Some registers
14934 also have mixed-case names. */
14935
14936 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14937 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14938 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14939 #define REGSET(p,t) \
14940 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14941 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14942 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14943 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14944 #define REGSETH(p,t) \
14945 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14946 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14947 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14948 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14949 #define REGSET2(p,t) \
14950 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14951 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14952 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14953 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14954
14955 static const struct reg_entry reg_names[] =
14956 {
14957 /* ARM integer registers. */
14958 REGSET(r, RN), REGSET(R, RN),
14959
14960 /* ATPCS synonyms. */
14961 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14962 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14963 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14964
14965 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14966 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14967 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14968
14969 /* Well-known aliases. */
14970 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14971 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14972
14973 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14974 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14975
14976 /* Coprocessor numbers. */
14977 REGSET(p, CP), REGSET(P, CP),
14978
14979 /* Coprocessor register numbers. The "cr" variants are for backward
14980 compatibility. */
14981 REGSET(c, CN), REGSET(C, CN),
14982 REGSET(cr, CN), REGSET(CR, CN),
14983
14984 /* FPA registers. */
14985 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14986 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14987
14988 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14989 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14990
14991 /* VFP SP registers. */
14992 REGSET(s,VFS), REGSET(S,VFS),
14993 REGSETH(s,VFS), REGSETH(S,VFS),
14994
14995 /* VFP DP Registers. */
14996 REGSET(d,VFD), REGSET(D,VFD),
14997 /* Extra Neon DP registers. */
14998 REGSETH(d,VFD), REGSETH(D,VFD),
14999
15000 /* Neon QP registers. */
15001 REGSET2(q,NQ), REGSET2(Q,NQ),
15002
15003 /* VFP control registers. */
15004 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
15005 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
15006 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
15007 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
15008 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
15009 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
15010
15011 /* Maverick DSP coprocessor registers. */
15012 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
15013 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
15014
15015 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
15016 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
15017 REGDEF(dspsc,0,DSPSC),
15018
15019 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
15020 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
15021 REGDEF(DSPSC,0,DSPSC),
15022
15023 /* iWMMXt data registers - p0, c0-15. */
15024 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
15025
15026 /* iWMMXt control registers - p1, c0-3. */
15027 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
15028 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
15029 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
15030 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
15031
15032 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
15033 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
15034 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
15035 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
15036 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
15037
15038 /* XScale accumulator registers. */
15039 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
15040 };
15041 #undef REGDEF
15042 #undef REGNUM
15043 #undef REGSET
15044
15045 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
15046 within psr_required_here. */
15047 static const struct asm_psr psrs[] =
15048 {
15049 /* Backward compatibility notation. Note that "all" is no longer
15050 truly all possible PSR bits. */
15051 {"all", PSR_c | PSR_f},
15052 {"flg", PSR_f},
15053 {"ctl", PSR_c},
15054
15055 /* Individual flags. */
15056 {"f", PSR_f},
15057 {"c", PSR_c},
15058 {"x", PSR_x},
15059 {"s", PSR_s},
15060 /* Combinations of flags. */
15061 {"fs", PSR_f | PSR_s},
15062 {"fx", PSR_f | PSR_x},
15063 {"fc", PSR_f | PSR_c},
15064 {"sf", PSR_s | PSR_f},
15065 {"sx", PSR_s | PSR_x},
15066 {"sc", PSR_s | PSR_c},
15067 {"xf", PSR_x | PSR_f},
15068 {"xs", PSR_x | PSR_s},
15069 {"xc", PSR_x | PSR_c},
15070 {"cf", PSR_c | PSR_f},
15071 {"cs", PSR_c | PSR_s},
15072 {"cx", PSR_c | PSR_x},
15073 {"fsx", PSR_f | PSR_s | PSR_x},
15074 {"fsc", PSR_f | PSR_s | PSR_c},
15075 {"fxs", PSR_f | PSR_x | PSR_s},
15076 {"fxc", PSR_f | PSR_x | PSR_c},
15077 {"fcs", PSR_f | PSR_c | PSR_s},
15078 {"fcx", PSR_f | PSR_c | PSR_x},
15079 {"sfx", PSR_s | PSR_f | PSR_x},
15080 {"sfc", PSR_s | PSR_f | PSR_c},
15081 {"sxf", PSR_s | PSR_x | PSR_f},
15082 {"sxc", PSR_s | PSR_x | PSR_c},
15083 {"scf", PSR_s | PSR_c | PSR_f},
15084 {"scx", PSR_s | PSR_c | PSR_x},
15085 {"xfs", PSR_x | PSR_f | PSR_s},
15086 {"xfc", PSR_x | PSR_f | PSR_c},
15087 {"xsf", PSR_x | PSR_s | PSR_f},
15088 {"xsc", PSR_x | PSR_s | PSR_c},
15089 {"xcf", PSR_x | PSR_c | PSR_f},
15090 {"xcs", PSR_x | PSR_c | PSR_s},
15091 {"cfs", PSR_c | PSR_f | PSR_s},
15092 {"cfx", PSR_c | PSR_f | PSR_x},
15093 {"csf", PSR_c | PSR_s | PSR_f},
15094 {"csx", PSR_c | PSR_s | PSR_x},
15095 {"cxf", PSR_c | PSR_x | PSR_f},
15096 {"cxs", PSR_c | PSR_x | PSR_s},
15097 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
15098 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
15099 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
15100 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
15101 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
15102 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
15103 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
15104 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
15105 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
15106 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
15107 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
15108 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
15109 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
15110 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
15111 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
15112 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
15113 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
15114 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
15115 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
15116 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
15117 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
15118 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
15119 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
15120 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
15121 };
15122
15123 /* Table of V7M psr names. */
15124 static const struct asm_psr v7m_psrs[] =
15125 {
15126 {"apsr", 0 }, {"APSR", 0 },
15127 {"iapsr", 1 }, {"IAPSR", 1 },
15128 {"eapsr", 2 }, {"EAPSR", 2 },
15129 {"psr", 3 }, {"PSR", 3 },
15130 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
15131 {"ipsr", 5 }, {"IPSR", 5 },
15132 {"epsr", 6 }, {"EPSR", 6 },
15133 {"iepsr", 7 }, {"IEPSR", 7 },
15134 {"msp", 8 }, {"MSP", 8 },
15135 {"psp", 9 }, {"PSP", 9 },
15136 {"primask", 16}, {"PRIMASK", 16},
15137 {"basepri", 17}, {"BASEPRI", 17},
15138 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
15139 {"faultmask", 19}, {"FAULTMASK", 19},
15140 {"control", 20}, {"CONTROL", 20}
15141 };
15142
15143 /* Table of all shift-in-operand names. */
15144 static const struct asm_shift_name shift_names [] =
15145 {
15146 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
15147 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
15148 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
15149 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
15150 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
15151 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
15152 };
15153
15154 /* Table of all explicit relocation names. */
15155 #ifdef OBJ_ELF
15156 static struct reloc_entry reloc_names[] =
15157 {
15158 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
15159 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
15160 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
15161 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
15162 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
15163 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
15164 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
15165 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
15166 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
15167 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
15168 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
15169 };
15170 #endif
15171
15172 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
15173 static const struct asm_cond conds[] =
15174 {
15175 {"eq", 0x0},
15176 {"ne", 0x1},
15177 {"cs", 0x2}, {"hs", 0x2},
15178 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
15179 {"mi", 0x4},
15180 {"pl", 0x5},
15181 {"vs", 0x6},
15182 {"vc", 0x7},
15183 {"hi", 0x8},
15184 {"ls", 0x9},
15185 {"ge", 0xa},
15186 {"lt", 0xb},
15187 {"gt", 0xc},
15188 {"le", 0xd},
15189 {"al", 0xe}
15190 };
15191
15192 static struct asm_barrier_opt barrier_opt_names[] =
15193 {
15194 { "sy", 0xf },
15195 { "un", 0x7 },
15196 { "st", 0xe },
15197 { "unst", 0x6 }
15198 };
15199
15200 /* Table of ARM-format instructions. */
15201
15202 /* Macros for gluing together operand strings. N.B. In all cases
15203 other than OPS0, the trailing OP_stop comes from default
15204 zero-initialization of the unspecified elements of the array. */
15205 #define OPS0() { OP_stop, }
15206 #define OPS1(a) { OP_##a, }
15207 #define OPS2(a,b) { OP_##a,OP_##b, }
15208 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
15209 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
15210 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
15211 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
15212
15213 /* These macros abstract out the exact format of the mnemonic table and
15214 save some repeated characters. */
15215
15216 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
15217 #define TxCE(mnem, op, top, nops, ops, ae, te) \
15218 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
15219 THUMB_VARIANT, do_##ae, do_##te }
15220
15221 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
15222 a T_MNEM_xyz enumerator. */
15223 #define TCE(mnem, aop, top, nops, ops, ae, te) \
15224 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
15225 #define tCE(mnem, aop, top, nops, ops, ae, te) \
15226 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15227
15228 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
15229 infix after the third character. */
15230 #define TxC3(mnem, op, top, nops, ops, ae, te) \
15231 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
15232 THUMB_VARIANT, do_##ae, do_##te }
15233 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
15234 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
15235 THUMB_VARIANT, do_##ae, do_##te }
15236 #define TC3(mnem, aop, top, nops, ops, ae, te) \
15237 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
15238 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
15239 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
15240 #define tC3(mnem, aop, top, nops, ops, ae, te) \
15241 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15242 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
15243 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15244
15245 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
15246 appear in the condition table. */
15247 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
15248 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
15249 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
15250
15251 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
15252 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
15253 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
15254 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
15255 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
15256 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
15257 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
15258 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
15259 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
15260 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
15261 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
15262 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
15263 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
15264 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
15265 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
15266 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
15267 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
15268 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
15269 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
15270 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
15271
15272 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
15273 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
15274 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
15275 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
15276
15277 /* Mnemonic that cannot be conditionalized. The ARM condition-code
15278 field is still 0xE. Many of the Thumb variants can be executed
15279 conditionally, so this is checked separately. */
15280 #define TUE(mnem, op, top, nops, ops, ae, te) \
15281 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
15282 THUMB_VARIANT, do_##ae, do_##te }
15283
15284 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
15285 condition code field. */
15286 #define TUF(mnem, op, top, nops, ops, ae, te) \
15287 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
15288 THUMB_VARIANT, do_##ae, do_##te }
15289
15290 /* ARM-only variants of all the above. */
15291 #define CE(mnem, op, nops, ops, ae) \
15292 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15293
15294 #define C3(mnem, op, nops, ops, ae) \
15295 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15296
15297 /* Legacy mnemonics that always have conditional infix after the third
15298 character. */
15299 #define CL(mnem, op, nops, ops, ae) \
15300 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
15301 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15302
15303 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
15304 #define cCE(mnem, op, nops, ops, ae) \
15305 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
15306
15307 /* Legacy coprocessor instructions where conditional infix and conditional
15308 suffix are ambiguous. For consistency this includes all FPA instructions,
15309 not just the potentially ambiguous ones. */
15310 #define cCL(mnem, op, nops, ops, ae) \
15311 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
15312 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
15313
15314 /* Coprocessor, takes either a suffix or a position-3 infix
15315 (for an FPA corner case). */
15316 #define C3E(mnem, op, nops, ops, ae) \
15317 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
15318 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
15319
15320 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
15321 { #m1 #m2 #m3, OPS##nops ops, \
15322 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
15323 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15324
15325 #define CM(m1, m2, op, nops, ops, ae) \
15326 xCM_(m1, , m2, op, nops, ops, ae), \
15327 xCM_(m1, eq, m2, op, nops, ops, ae), \
15328 xCM_(m1, ne, m2, op, nops, ops, ae), \
15329 xCM_(m1, cs, m2, op, nops, ops, ae), \
15330 xCM_(m1, hs, m2, op, nops, ops, ae), \
15331 xCM_(m1, cc, m2, op, nops, ops, ae), \
15332 xCM_(m1, ul, m2, op, nops, ops, ae), \
15333 xCM_(m1, lo, m2, op, nops, ops, ae), \
15334 xCM_(m1, mi, m2, op, nops, ops, ae), \
15335 xCM_(m1, pl, m2, op, nops, ops, ae), \
15336 xCM_(m1, vs, m2, op, nops, ops, ae), \
15337 xCM_(m1, vc, m2, op, nops, ops, ae), \
15338 xCM_(m1, hi, m2, op, nops, ops, ae), \
15339 xCM_(m1, ls, m2, op, nops, ops, ae), \
15340 xCM_(m1, ge, m2, op, nops, ops, ae), \
15341 xCM_(m1, lt, m2, op, nops, ops, ae), \
15342 xCM_(m1, gt, m2, op, nops, ops, ae), \
15343 xCM_(m1, le, m2, op, nops, ops, ae), \
15344 xCM_(m1, al, m2, op, nops, ops, ae)
15345
15346 #define UE(mnem, op, nops, ops, ae) \
15347 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
15348
15349 #define UF(mnem, op, nops, ops, ae) \
15350 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
15351
15352 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
15353 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
15354 use the same encoding function for each. */
15355 #define NUF(mnem, op, nops, ops, enc) \
15356 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
15357 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
15358
15359 /* Neon data processing, version which indirects through neon_enc_tab for
15360 the various overloaded versions of opcodes. */
15361 #define nUF(mnem, op, nops, ops, enc) \
15362 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
15363 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
15364
15365 /* Neon insn with conditional suffix for the ARM version, non-overloaded
15366 version. */
15367 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
15368 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
15369 THUMB_VARIANT, do_##enc, do_##enc }
15370
15371 #define NCE(mnem, op, nops, ops, enc) \
15372 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
15373
15374 #define NCEF(mnem, op, nops, ops, enc) \
15375 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
15376
15377 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
15378 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
15379 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
15380 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
15381
15382 #define nCE(mnem, op, nops, ops, enc) \
15383 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
15384
15385 #define nCEF(mnem, op, nops, ops, enc) \
15386 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
15387
15388 #define do_0 0
15389
15390 /* Thumb-only, unconditional. */
15391 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
15392
15393 static const struct asm_opcode insns[] =
15394 {
15395 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
15396 #define THUMB_VARIANT &arm_ext_v4t
15397 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
15398 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
15399 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
15400 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
15401 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
15402 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
15403 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
15404 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
15405 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
15406 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
15407 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
15408 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
15409 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
15410 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
15411 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
15412 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
15413
15414 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
15415 for setting PSR flag bits. They are obsolete in V6 and do not
15416 have Thumb equivalents. */
15417 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
15418 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
15419 CL(tstp, 110f000, 2, (RR, SH), cmp),
15420 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
15421 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
15422 CL(cmpp, 150f000, 2, (RR, SH), cmp),
15423 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
15424 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
15425 CL(cmnp, 170f000, 2, (RR, SH), cmp),
15426
15427 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
15428 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
15429 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
15430 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
15431
15432 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
15433 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
15434 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
15435 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
15436
15437 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15438 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15439 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15440 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15441 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15442 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15443
15444 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
15445 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
15446 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
15447 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
15448
15449 /* Pseudo ops. */
15450 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
15451 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
15452 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
15453
15454 /* Thumb-compatibility pseudo ops. */
15455 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
15456 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
15457 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
15458 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
15459 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
15460 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
15461 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
15462 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
15463 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
15464 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
15465 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
15466 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
15467
15468 /* These may simplify to neg. */
15469 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
15470 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
15471
15472 #undef THUMB_VARIANT
15473 #define THUMB_VARIANT &arm_ext_v6
15474 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
15475
15476 /* V1 instructions with no Thumb analogue prior to V6T2. */
15477 #undef THUMB_VARIANT
15478 #define THUMB_VARIANT &arm_ext_v6t2
15479 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
15480 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
15481 CL(teqp, 130f000, 2, (RR, SH), cmp),
15482
15483 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
15484 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
15485 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
15486 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
15487
15488 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15489 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15490
15491 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15492 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15493
15494 /* V1 instructions with no Thumb analogue at all. */
15495 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
15496 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
15497
15498 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
15499 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
15500 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
15501 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
15502 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
15503 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
15504 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
15505 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
15506
15507 #undef ARM_VARIANT
15508 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
15509 #undef THUMB_VARIANT
15510 #define THUMB_VARIANT &arm_ext_v4t
15511 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
15512 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
15513
15514 #undef THUMB_VARIANT
15515 #define THUMB_VARIANT &arm_ext_v6t2
15516 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15517 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
15518
15519 /* Generic coprocessor instructions. */
15520 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
15521 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15522 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15523 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15524 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15525 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15526 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15527
15528 #undef ARM_VARIANT
15529 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
15530 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
15531 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
15532
15533 #undef ARM_VARIANT
15534 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
15535 #undef THUMB_VARIANT
15536 #define THUMB_VARIANT &arm_ext_msr
15537 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
15538 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
15539
15540 #undef ARM_VARIANT
15541 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
15542 #undef THUMB_VARIANT
15543 #define THUMB_VARIANT &arm_ext_v6t2
15544 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15545 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15546 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15547 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15548 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15549 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15550 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15551 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15552
15553 #undef ARM_VARIANT
15554 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
15555 #undef THUMB_VARIANT
15556 #define THUMB_VARIANT &arm_ext_v4t
15557 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15558 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15559 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15560 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15561 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15562 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15563
15564 #undef ARM_VARIANT
15565 #define ARM_VARIANT &arm_ext_v4t_5
15566 /* ARM Architecture 4T. */
15567 /* Note: bx (and blx) are required on V5, even if the processor does
15568 not support Thumb. */
15569 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
15570
15571 #undef ARM_VARIANT
15572 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
15573 #undef THUMB_VARIANT
15574 #define THUMB_VARIANT &arm_ext_v5t
15575 /* Note: blx has 2 variants; the .value coded here is for
15576 BLX(2). Only this variant has conditional execution. */
15577 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
15578 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
15579
15580 #undef THUMB_VARIANT
15581 #define THUMB_VARIANT &arm_ext_v6t2
15582 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
15583 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15584 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15585 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15586 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15587 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
15588 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15589 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15590
15591 #undef ARM_VARIANT
15592 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
15593 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15594 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15595 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15596 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15597
15598 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15599 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15600
15601 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15602 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15603 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15604 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15605
15606 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15607 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15608 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15609 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15610
15611 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15612 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15613
15614 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
15615 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
15616 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
15617 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
15618
15619 #undef ARM_VARIANT
15620 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
15621 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
15622 TC3(ldrd, 00000d0, e8500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
15623 TC3(strd, 00000f0, e8400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
15624
15625 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15626 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15627
15628 #undef ARM_VARIANT
15629 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
15630 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
15631
15632 #undef ARM_VARIANT
15633 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
15634 #undef THUMB_VARIANT
15635 #define THUMB_VARIANT &arm_ext_v6
15636 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
15637 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
15638 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15639 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15640 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15641 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15642 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15643 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15644 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15645 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
15646
15647 #undef THUMB_VARIANT
15648 #define THUMB_VARIANT &arm_ext_v6t2
15649 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
15650 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
15651 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15652 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15653
15654 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
15655 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
15656
15657 /* ARM V6 not included in V7M (eg. integer SIMD). */
15658 #undef THUMB_VARIANT
15659 #define THUMB_VARIANT &arm_ext_v6_notm
15660 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
15661 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
15662 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
15663 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15664 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15665 TCE(qasx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15666 /* Old name for QASX. */
15667 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15668 TCE(qsax, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15669 /* Old name for QSAX. */
15670 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15671 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15672 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15673 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15674 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15675 TCE(sasx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15676 /* Old name for SASX. */
15677 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15678 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15679 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15680 TCE(shasx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15681 /* Old name for SHASX. */
15682 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15683 TCE(shsax, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15684 /* Old name for SHSAX. */
15685 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15686 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15687 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15688 TCE(ssax, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15689 /* Old name for SSAX. */
15690 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15691 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15692 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15693 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15694 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15695 TCE(uasx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15696 /* Old name for UASX. */
15697 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15698 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15699 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15700 TCE(uhasx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15701 /* Old name for UHASX. */
15702 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15703 TCE(uhsax, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15704 /* Old name for UHSAX. */
15705 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15706 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15707 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15708 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15709 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15710 TCE(uqasx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15711 /* Old name for UQASX. */
15712 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15713 TCE(uqsax, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15714 /* Old name for UQSAX. */
15715 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15716 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15717 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15718 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15719 TCE(usax, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15720 /* Old name for USAX. */
15721 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15722 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15723 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15724 UF(rfeib, 9900a00, 1, (RRw), rfe),
15725 UF(rfeda, 8100a00, 1, (RRw), rfe),
15726 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15727 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15728 UF(rfefa, 9900a00, 1, (RRw), rfe),
15729 UF(rfeea, 8100a00, 1, (RRw), rfe),
15730 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15731 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15732 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15733 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15734 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15735 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15736 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15737 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15738 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15739 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15740 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15741 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15742 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15743 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15744 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15745 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15746 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15747 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15748 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15749 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15750 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15751 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15752 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15753 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15754 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15755 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15756 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15757 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15758 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
15759 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
15760 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
15761 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
15762 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
15763 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
15764 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15765 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15766 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
15767
15768 #undef ARM_VARIANT
15769 #define ARM_VARIANT &arm_ext_v6k
15770 #undef THUMB_VARIANT
15771 #define THUMB_VARIANT &arm_ext_v6k
15772 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
15773 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
15774 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
15775 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
15776
15777 #undef THUMB_VARIANT
15778 #define THUMB_VARIANT &arm_ext_v6_notm
15779 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
15780 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
15781
15782 #undef THUMB_VARIANT
15783 #define THUMB_VARIANT &arm_ext_v6t2
15784 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15785 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15786 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15787 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15788 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
15789
15790 #undef ARM_VARIANT
15791 #define ARM_VARIANT &arm_ext_v6z
15792 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
15793
15794 #undef ARM_VARIANT
15795 #define ARM_VARIANT &arm_ext_v6t2
15796 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
15797 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
15798 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15799 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15800
15801 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15802 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
15803 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
15804 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
15805
15806 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15807 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15808 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15809 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15810
15811 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
15812 UT(cbz, b100, 2, (RR, EXP), t_cbz),
15813 /* ARM does not really have an IT instruction, so always allow it. */
15814 #undef ARM_VARIANT
15815 #define ARM_VARIANT &arm_ext_v1
15816 TUE(it, 0, bf08, 1, (COND), it, t_it),
15817 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
15818 TUE(ite, 0, bf04, 1, (COND), it, t_it),
15819 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
15820 TUE(itet, 0, bf06, 1, (COND), it, t_it),
15821 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
15822 TUE(itee, 0, bf02, 1, (COND), it, t_it),
15823 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
15824 TUE(itett, 0, bf07, 1, (COND), it, t_it),
15825 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
15826 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
15827 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
15828 TUE(itete, 0, bf05, 1, (COND), it, t_it),
15829 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
15830 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
15831 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
15832 TC3(rrx, 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
15833 TC3(rrxs, 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
15834
15835 /* Thumb2 only instructions. */
15836 #undef ARM_VARIANT
15837 #define ARM_VARIANT NULL
15838
15839 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15840 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15841 TCE(orn, 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
15842 TCE(orns, 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
15843 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
15844 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
15845
15846 /* Thumb-2 hardware division instructions (R and M profiles only). */
15847 #undef THUMB_VARIANT
15848 #define THUMB_VARIANT &arm_ext_div
15849 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15850 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15851
15852 /* ARM V6M/V7 instructions. */
15853 #undef ARM_VARIANT
15854 #define ARM_VARIANT &arm_ext_barrier
15855 #undef THUMB_VARIANT
15856 #define THUMB_VARIANT &arm_ext_barrier
15857 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15858 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15859 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15860
15861 /* ARM V7 instructions. */
15862 #undef ARM_VARIANT
15863 #define ARM_VARIANT &arm_ext_v7
15864 #undef THUMB_VARIANT
15865 #define THUMB_VARIANT &arm_ext_v7
15866 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15867 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15868
15869 #undef ARM_VARIANT
15870 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15871 cCE(wfs, e200110, 1, (RR), rd),
15872 cCE(rfs, e300110, 1, (RR), rd),
15873 cCE(wfc, e400110, 1, (RR), rd),
15874 cCE(rfc, e500110, 1, (RR), rd),
15875
15876 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15877 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15878 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15879 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15880
15881 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15882 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15883 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15884 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15885
15886 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15887 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15888 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15889 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15890 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15891 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15892 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15893 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15894 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15895 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15896 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15897 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15898
15899 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15900 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15901 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15902 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15903 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15904 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15905 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15906 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15907 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15908 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15909 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15910 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15911
15912 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15913 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15914 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15915 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15916 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15917 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15918 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15919 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15920 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15921 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15922 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15923 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15924
15925 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15926 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15927 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15928 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15929 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15930 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15931 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15932 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15933 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15934 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15935 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15936 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15937
15938 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15939 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15940 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15941 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15942 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15943 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15944 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15945 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15946 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15947 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15948 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15949 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15950
15951 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15952 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15953 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15954 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15955 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15956 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15957 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15958 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15959 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15960 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15961 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15962 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15963
15964 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15965 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15966 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15967 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15968 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15969 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15970 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15971 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15972 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15973 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15974 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15975 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15976
15977 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15978 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15979 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15980 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15981 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15982 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15983 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15984 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15985 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15986 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15987 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15988 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15989
15990 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15991 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15992 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15993 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15994 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15995 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15996 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15997 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15998 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15999 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
16000 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
16001 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
16002
16003 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
16004 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
16005 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
16006 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
16007 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
16008 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
16009 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
16010 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
16011 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
16012 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
16013 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
16014 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
16015
16016 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
16017 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
16018 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
16019 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
16020 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
16021 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
16022 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
16023 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
16024 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
16025 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
16026 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
16027 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
16028
16029 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
16030 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
16031 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
16032 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
16033 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
16034 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
16035 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
16036 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
16037 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
16038 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
16039 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
16040 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
16041
16042 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
16043 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
16044 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
16045 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
16046 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
16047 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
16048 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
16049 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
16050 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
16051 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
16052 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
16053 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
16054
16055 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
16056 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
16057 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
16058 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
16059 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
16060 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
16061 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
16062 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
16063 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
16064 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
16065 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
16066 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
16067
16068 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
16069 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
16070 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
16071 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
16072 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
16073 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
16074 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
16075 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
16076 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
16077 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
16078 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
16079 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
16080
16081 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
16082 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
16083 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
16084 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
16085 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
16086 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
16087 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
16088 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
16089 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
16090 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
16091 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
16092 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
16093
16094 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
16095 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
16096 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
16097 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
16098 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
16099 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16100 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16101 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16102 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
16103 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
16104 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
16105 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
16106
16107 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
16108 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
16109 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
16110 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
16111 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
16112 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16113 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16114 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16115 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
16116 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
16117 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
16118 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
16119
16120 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
16121 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
16122 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
16123 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
16124 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
16125 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16126 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16127 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16128 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
16129 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
16130 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
16131 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
16132
16133 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
16134 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
16135 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
16136 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
16137 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
16138 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16139 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16140 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16141 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
16142 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
16143 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
16144 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
16145
16146 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
16147 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
16148 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
16149 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
16150 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
16151 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16152 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16153 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16154 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
16155 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
16156 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
16157 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
16158
16159 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
16160 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
16161 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
16162 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
16163 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
16164 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16165 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16166 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16167 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
16168 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
16169 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
16170 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
16171
16172 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
16173 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
16174 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
16175 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
16176 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
16177 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16178 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16179 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16180 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
16181 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
16182 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
16183 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
16184
16185 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
16186 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
16187 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
16188 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
16189 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
16190 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16191 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16192 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16193 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
16194 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
16195 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
16196 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
16197
16198 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
16199 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
16200 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
16201 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
16202 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
16203 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16204 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16205 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16206 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
16207 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
16208 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
16209 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
16210
16211 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
16212 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
16213 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
16214 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
16215 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
16216 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16217 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16218 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16219 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
16220 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
16221 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
16222 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
16223
16224 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
16225 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
16226 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
16227 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
16228 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
16229 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16230 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16231 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16232 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
16233 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
16234 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
16235 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
16236
16237 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
16238 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
16239 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
16240 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
16241 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
16242 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16243 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16244 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16245 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
16246 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
16247 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
16248 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
16249
16250 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
16251 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
16252 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
16253 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
16254 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
16255 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16256 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16257 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16258 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
16259 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
16260 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
16261 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
16262
16263 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
16264 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
16265 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
16266 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
16267
16268 cCL(flts, e000110, 2, (RF, RR), rn_rd),
16269 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
16270 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
16271 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
16272 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
16273 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
16274 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
16275 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
16276 cCL(flte, e080110, 2, (RF, RR), rn_rd),
16277 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
16278 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
16279 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
16280
16281 /* The implementation of the FIX instruction is broken on some
16282 assemblers, in that it accepts a precision specifier as well as a
16283 rounding specifier, despite the fact that this is meaningless.
16284 To be more compatible, we accept it as well, though of course it
16285 does not set any bits. */
16286 cCE(fix, e100110, 2, (RR, RF), rd_rm),
16287 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
16288 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
16289 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
16290 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
16291 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
16292 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
16293 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
16294 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
16295 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
16296 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
16297 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
16298 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
16299
16300 /* Instructions that were new with the real FPA, call them V2. */
16301 #undef ARM_VARIANT
16302 #define ARM_VARIANT &fpu_fpa_ext_v2
16303 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16304 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16305 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16306 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16307 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16308 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16309
16310 #undef ARM_VARIANT
16311 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
16312 /* Moves and type conversions. */
16313 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
16314 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
16315 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
16316 cCE(fmstat, ef1fa10, 0, (), noargs),
16317 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
16318 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
16319 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
16320 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
16321 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
16322 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
16323 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
16324 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
16325
16326 /* Memory operations. */
16327 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
16328 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
16329 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
16330 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
16331 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
16332 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
16333 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
16334 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
16335 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
16336 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
16337 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
16338 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
16339 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
16340 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
16341 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
16342 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
16343 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
16344 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
16345
16346 /* Monadic operations. */
16347 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
16348 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
16349 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
16350
16351 /* Dyadic operations. */
16352 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16353 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16354 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16355 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16356 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16357 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16358 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16359 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16360 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16361
16362 /* Comparisons. */
16363 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
16364 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
16365 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
16366 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
16367
16368 #undef ARM_VARIANT
16369 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
16370 /* Moves and type conversions. */
16371 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
16372 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
16373 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
16374 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
16375 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
16376 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
16377 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
16378 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
16379 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
16380 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
16381 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
16382 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
16383 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
16384
16385 /* Memory operations. */
16386 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
16387 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
16388 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
16389 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
16390 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
16391 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
16392 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
16393 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
16394 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
16395 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
16396
16397 /* Monadic operations. */
16398 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
16399 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
16400 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
16401
16402 /* Dyadic operations. */
16403 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16404 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16405 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16406 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16407 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16408 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16409 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16410 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16411 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16412
16413 /* Comparisons. */
16414 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
16415 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
16416 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
16417 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
16418
16419 #undef ARM_VARIANT
16420 #define ARM_VARIANT &fpu_vfp_ext_v2
16421 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
16422 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
16423 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
16424 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
16425
16426 /* Instructions which may belong to either the Neon or VFP instruction sets.
16427 Individual encoder functions perform additional architecture checks. */
16428 #undef ARM_VARIANT
16429 #define ARM_VARIANT &fpu_vfp_ext_v1xd
16430 #undef THUMB_VARIANT
16431 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
16432 /* These mnemonics are unique to VFP. */
16433 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
16434 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
16435 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
16436 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
16437 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
16438 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
16439 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
16440 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
16441 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
16442 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
16443
16444 /* Mnemonics shared by Neon and VFP. */
16445 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
16446 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
16447 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
16448
16449 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
16450 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
16451
16452 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
16453 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
16454
16455 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16456 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16457 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16458 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16459 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16460 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16461 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
16462 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
16463
16464 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
16465 nCEF(vcvtb, vcvt, 2, (RVS, RVS), neon_cvtb),
16466 nCEF(vcvtt, vcvt, 2, (RVS, RVS), neon_cvtt),
16467
16468
16469 /* NOTE: All VMOV encoding is special-cased! */
16470 NCE(vmov, 0, 1, (VMOV), neon_mov),
16471 NCE(vmovq, 0, 1, (VMOV), neon_mov),
16472
16473 #undef THUMB_VARIANT
16474 #define THUMB_VARIANT &fpu_neon_ext_v1
16475 #undef ARM_VARIANT
16476 #define ARM_VARIANT &fpu_neon_ext_v1
16477 /* Data processing with three registers of the same length. */
16478 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
16479 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
16480 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
16481 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
16482 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
16483 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
16484 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
16485 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
16486 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
16487 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
16488 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
16489 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
16490 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
16491 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
16492 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
16493 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
16494 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
16495 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
16496 /* If not immediate, fall back to neon_dyadic_i64_su.
16497 shl_imm should accept I8 I16 I32 I64,
16498 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
16499 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
16500 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
16501 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
16502 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
16503 /* Logic ops, types optional & ignored. */
16504 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
16505 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
16506 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
16507 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
16508 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
16509 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
16510 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
16511 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
16512 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
16513 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
16514 /* Bitfield ops, untyped. */
16515 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
16516 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
16517 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
16518 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
16519 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
16520 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
16521 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
16522 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
16523 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
16524 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
16525 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
16526 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
16527 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
16528 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
16529 back to neon_dyadic_if_su. */
16530 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
16531 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
16532 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
16533 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
16534 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
16535 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
16536 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
16537 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
16538 /* Comparison. Type I8 I16 I32 F32. */
16539 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
16540 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
16541 /* As above, D registers only. */
16542 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
16543 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
16544 /* Int and float variants, signedness unimportant. */
16545 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
16546 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
16547 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
16548 /* Add/sub take types I8 I16 I32 I64 F32. */
16549 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
16550 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
16551 /* vtst takes sizes 8, 16, 32. */
16552 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
16553 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
16554 /* VMUL takes I8 I16 I32 F32 P8. */
16555 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
16556 /* VQD{R}MULH takes S16 S32. */
16557 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
16558 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
16559 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
16560 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
16561 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
16562 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
16563 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
16564 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
16565 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
16566 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
16567 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
16568 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
16569 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
16570 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
16571 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
16572 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
16573
16574 /* Two address, int/float. Types S8 S16 S32 F32. */
16575 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
16576 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
16577
16578 /* Data processing with two registers and a shift amount. */
16579 /* Right shifts, and variants with rounding.
16580 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
16581 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
16582 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
16583 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
16584 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
16585 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
16586 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
16587 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
16588 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
16589 /* Shift and insert. Sizes accepted 8 16 32 64. */
16590 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
16591 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
16592 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
16593 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
16594 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
16595 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
16596 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
16597 /* Right shift immediate, saturating & narrowing, with rounding variants.
16598 Types accepted S16 S32 S64 U16 U32 U64. */
16599 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
16600 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
16601 /* As above, unsigned. Types accepted S16 S32 S64. */
16602 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
16603 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
16604 /* Right shift narrowing. Types accepted I16 I32 I64. */
16605 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
16606 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
16607 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
16608 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
16609 /* CVT with optional immediate for fixed-point variant. */
16610 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
16611
16612 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
16613 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
16614
16615 /* Data processing, three registers of different lengths. */
16616 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
16617 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
16618 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
16619 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
16620 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
16621 /* If not scalar, fall back to neon_dyadic_long.
16622 Vector types as above, scalar types S16 S32 U16 U32. */
16623 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
16624 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
16625 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
16626 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
16627 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
16628 /* Dyadic, narrowing insns. Types I16 I32 I64. */
16629 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16630 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16631 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16632 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16633 /* Saturating doubling multiplies. Types S16 S32. */
16634 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
16635 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
16636 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
16637 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
16638 S16 S32 U16 U32. */
16639 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
16640
16641 /* Extract. Size 8. */
16642 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
16643 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
16644
16645 /* Two registers, miscellaneous. */
16646 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
16647 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
16648 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
16649 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
16650 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
16651 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
16652 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
16653 /* Vector replicate. Sizes 8 16 32. */
16654 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
16655 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
16656 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
16657 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
16658 /* VMOVN. Types I16 I32 I64. */
16659 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
16660 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
16661 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
16662 /* VQMOVUN. Types S16 S32 S64. */
16663 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
16664 /* VZIP / VUZP. Sizes 8 16 32. */
16665 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
16666 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
16667 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
16668 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
16669 /* VQABS / VQNEG. Types S8 S16 S32. */
16670 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16671 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
16672 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16673 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
16674 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
16675 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
16676 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
16677 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
16678 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
16679 /* Reciprocal estimates. Types U32 F32. */
16680 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
16681 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
16682 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
16683 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
16684 /* VCLS. Types S8 S16 S32. */
16685 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
16686 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
16687 /* VCLZ. Types I8 I16 I32. */
16688 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
16689 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
16690 /* VCNT. Size 8. */
16691 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
16692 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
16693 /* Two address, untyped. */
16694 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
16695 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
16696 /* VTRN. Sizes 8 16 32. */
16697 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
16698 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
16699
16700 /* Table lookup. Size 8. */
16701 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16702 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16703
16704 #undef THUMB_VARIANT
16705 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
16706 #undef ARM_VARIANT
16707 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
16708 /* Neon element/structure load/store. */
16709 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16710 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16711 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16712 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16713 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16714 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16715 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16716 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16717
16718 #undef THUMB_VARIANT
16719 #define THUMB_VARIANT &fpu_vfp_ext_v3
16720 #undef ARM_VARIANT
16721 #define ARM_VARIANT &fpu_vfp_ext_v3
16722 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
16723 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
16724 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16725 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16726 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16727 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16728 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16729 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16730 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16731 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16732 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16733 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16734 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16735 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16736 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16737 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16738 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16739 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16740
16741 #undef THUMB_VARIANT
16742 #undef ARM_VARIANT
16743 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
16744 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16745 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16746 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16747 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16748 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16749 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16750 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
16751 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
16752
16753 #undef ARM_VARIANT
16754 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16755 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
16756 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
16757 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
16758 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
16759 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
16760 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
16761 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
16762 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
16763 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
16764 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16765 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16766 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16767 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16768 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16769 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16770 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16771 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16772 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16773 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
16774 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
16775 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16776 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16777 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16778 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16779 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16780 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16781 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
16782 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
16783 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
16784 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
16785 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
16786 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
16787 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
16788 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
16789 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
16790 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
16791 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
16792 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16793 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16794 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16795 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16796 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16797 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16798 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16799 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16800 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16801 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
16802 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16803 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16804 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16805 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16806 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16807 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16808 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16809 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16810 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16811 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16812 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16813 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16814 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16815 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16816 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16817 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16818 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16819 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16820 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16821 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16822 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16823 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16824 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16825 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16826 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16827 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16828 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16829 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16830 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16831 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16832 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16833 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16834 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16835 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16836 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16837 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16838 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16839 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16840 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16841 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16842 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16843 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
16844 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16845 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16846 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16847 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16848 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16849 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16850 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16851 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16852 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16853 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16854 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16855 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16856 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16857 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16858 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16859 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16860 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16861 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16862 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16863 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16864 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16865 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16866 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16867 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16868 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16869 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16870 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16871 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16872 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16873 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16874 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16875 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16876 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16877 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16878 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16879 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16880 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16881 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16882 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16883 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16884 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16885 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16886 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16887 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16888 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16889 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16890 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16891 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16892 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16893 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16894 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16895 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16896 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16897 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16898 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16899 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16900 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16901 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16902 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16903 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16904 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16905 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16906 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16907 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16908 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16909 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16910 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16911 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16912 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16913 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16914 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16915 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16916 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16917
16918 #undef ARM_VARIANT
16919 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16920 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16921 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16922 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16923 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16924 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16925 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16926 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16927 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16928 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16929 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16930 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16931 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16932 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16933 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16934 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16935 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16936 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16937 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16938 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16939 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16940 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16941 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16942 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16943 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16944 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16945 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16946 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16947 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16948 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16949 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16950 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16951 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16952 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16953 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16954 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16955 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16956 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16957 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16958 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16959 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16960 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16961 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16962 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16963 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16964 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16965 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16966 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16967 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16968 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16969 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16970 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16971 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16972 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16973 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16974 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16975 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16976 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16977
16978 #undef ARM_VARIANT
16979 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16980 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16981 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16982 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16983 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16984 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16985 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16986 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16987 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16988 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16989 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16990 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16991 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16992 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16993 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16994 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16995 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16996 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16997 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16998 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16999 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
17000 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
17001 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
17002 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
17003 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
17004 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
17005 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
17006 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
17007 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
17008 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
17009 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
17010 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
17011 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
17012 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
17013 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
17014 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
17015 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
17016 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
17017 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
17018 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
17019 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
17020 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
17021 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
17022 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
17023 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
17024 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
17025 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
17026 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
17027 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
17028 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
17029 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
17030 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
17031 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
17032 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
17033 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
17034 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
17035 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
17036 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
17037 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
17038 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
17039 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
17040 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
17041 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
17042 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
17043 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
17044 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17045 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
17046 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17047 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
17048 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17049 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
17050 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17051 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17052 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
17053 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
17054 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
17055 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
17056 };
17057 #undef ARM_VARIANT
17058 #undef THUMB_VARIANT
17059 #undef TCE
17060 #undef TCM
17061 #undef TUE
17062 #undef TUF
17063 #undef TCC
17064 #undef cCE
17065 #undef cCL
17066 #undef C3E
17067 #undef CE
17068 #undef CM
17069 #undef UE
17070 #undef UF
17071 #undef UT
17072 #undef NUF
17073 #undef nUF
17074 #undef NCE
17075 #undef nCE
17076 #undef OPS0
17077 #undef OPS1
17078 #undef OPS2
17079 #undef OPS3
17080 #undef OPS4
17081 #undef OPS5
17082 #undef OPS6
17083 #undef do_0
17084 \f
17085 /* MD interface: bits in the object file. */
17086
17087 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
17088 for use in the a.out file, and stores them in the array pointed to by buf.
17089 This knows about the endian-ness of the target machine and does
17090 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
17091 2 (short) and 4 (long) Floating numbers are put out as a series of
17092 LITTLENUMS (shorts, here at least). */
17093
17094 void
17095 md_number_to_chars (char * buf, valueT val, int n)
17096 {
17097 if (target_big_endian)
17098 number_to_chars_bigendian (buf, val, n);
17099 else
17100 number_to_chars_littleendian (buf, val, n);
17101 }
17102
17103 static valueT
17104 md_chars_to_number (char * buf, int n)
17105 {
17106 valueT result = 0;
17107 unsigned char * where = (unsigned char *) buf;
17108
17109 if (target_big_endian)
17110 {
17111 while (n--)
17112 {
17113 result <<= 8;
17114 result |= (*where++ & 255);
17115 }
17116 }
17117 else
17118 {
17119 while (n--)
17120 {
17121 result <<= 8;
17122 result |= (where[n] & 255);
17123 }
17124 }
17125
17126 return result;
17127 }
17128
17129 /* MD interface: Sections. */
17130
17131 /* Estimate the size of a frag before relaxing. Assume everything fits in
17132 2 bytes. */
17133
17134 int
17135 md_estimate_size_before_relax (fragS * fragp,
17136 segT segtype ATTRIBUTE_UNUSED)
17137 {
17138 fragp->fr_var = 2;
17139 return 2;
17140 }
17141
17142 /* Convert a machine dependent frag. */
17143
17144 void
17145 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
17146 {
17147 unsigned long insn;
17148 unsigned long old_op;
17149 char *buf;
17150 expressionS exp;
17151 fixS *fixp;
17152 int reloc_type;
17153 int pc_rel;
17154 int opcode;
17155
17156 buf = fragp->fr_literal + fragp->fr_fix;
17157
17158 old_op = bfd_get_16(abfd, buf);
17159 if (fragp->fr_symbol)
17160 {
17161 exp.X_op = O_symbol;
17162 exp.X_add_symbol = fragp->fr_symbol;
17163 }
17164 else
17165 {
17166 exp.X_op = O_constant;
17167 }
17168 exp.X_add_number = fragp->fr_offset;
17169 opcode = fragp->fr_subtype;
17170 switch (opcode)
17171 {
17172 case T_MNEM_ldr_pc:
17173 case T_MNEM_ldr_pc2:
17174 case T_MNEM_ldr_sp:
17175 case T_MNEM_str_sp:
17176 case T_MNEM_ldr:
17177 case T_MNEM_ldrb:
17178 case T_MNEM_ldrh:
17179 case T_MNEM_str:
17180 case T_MNEM_strb:
17181 case T_MNEM_strh:
17182 if (fragp->fr_var == 4)
17183 {
17184 insn = THUMB_OP32 (opcode);
17185 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
17186 {
17187 insn |= (old_op & 0x700) << 4;
17188 }
17189 else
17190 {
17191 insn |= (old_op & 7) << 12;
17192 insn |= (old_op & 0x38) << 13;
17193 }
17194 insn |= 0x00000c00;
17195 put_thumb32_insn (buf, insn);
17196 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
17197 }
17198 else
17199 {
17200 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
17201 }
17202 pc_rel = (opcode == T_MNEM_ldr_pc2);
17203 break;
17204 case T_MNEM_adr:
17205 if (fragp->fr_var == 4)
17206 {
17207 insn = THUMB_OP32 (opcode);
17208 insn |= (old_op & 0xf0) << 4;
17209 put_thumb32_insn (buf, insn);
17210 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
17211 }
17212 else
17213 {
17214 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
17215 exp.X_add_number -= 4;
17216 }
17217 pc_rel = 1;
17218 break;
17219 case T_MNEM_mov:
17220 case T_MNEM_movs:
17221 case T_MNEM_cmp:
17222 case T_MNEM_cmn:
17223 if (fragp->fr_var == 4)
17224 {
17225 int r0off = (opcode == T_MNEM_mov
17226 || opcode == T_MNEM_movs) ? 0 : 8;
17227 insn = THUMB_OP32 (opcode);
17228 insn = (insn & 0xe1ffffff) | 0x10000000;
17229 insn |= (old_op & 0x700) << r0off;
17230 put_thumb32_insn (buf, insn);
17231 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
17232 }
17233 else
17234 {
17235 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
17236 }
17237 pc_rel = 0;
17238 break;
17239 case T_MNEM_b:
17240 if (fragp->fr_var == 4)
17241 {
17242 insn = THUMB_OP32(opcode);
17243 put_thumb32_insn (buf, insn);
17244 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
17245 }
17246 else
17247 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
17248 pc_rel = 1;
17249 break;
17250 case T_MNEM_bcond:
17251 if (fragp->fr_var == 4)
17252 {
17253 insn = THUMB_OP32(opcode);
17254 insn |= (old_op & 0xf00) << 14;
17255 put_thumb32_insn (buf, insn);
17256 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
17257 }
17258 else
17259 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
17260 pc_rel = 1;
17261 break;
17262 case T_MNEM_add_sp:
17263 case T_MNEM_add_pc:
17264 case T_MNEM_inc_sp:
17265 case T_MNEM_dec_sp:
17266 if (fragp->fr_var == 4)
17267 {
17268 /* ??? Choose between add and addw. */
17269 insn = THUMB_OP32 (opcode);
17270 insn |= (old_op & 0xf0) << 4;
17271 put_thumb32_insn (buf, insn);
17272 if (opcode == T_MNEM_add_pc)
17273 reloc_type = BFD_RELOC_ARM_T32_IMM12;
17274 else
17275 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
17276 }
17277 else
17278 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
17279 pc_rel = 0;
17280 break;
17281
17282 case T_MNEM_addi:
17283 case T_MNEM_addis:
17284 case T_MNEM_subi:
17285 case T_MNEM_subis:
17286 if (fragp->fr_var == 4)
17287 {
17288 insn = THUMB_OP32 (opcode);
17289 insn |= (old_op & 0xf0) << 4;
17290 insn |= (old_op & 0xf) << 16;
17291 put_thumb32_insn (buf, insn);
17292 if (insn & (1 << 20))
17293 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
17294 else
17295 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
17296 }
17297 else
17298 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
17299 pc_rel = 0;
17300 break;
17301 default:
17302 abort ();
17303 }
17304 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
17305 reloc_type);
17306 fixp->fx_file = fragp->fr_file;
17307 fixp->fx_line = fragp->fr_line;
17308 fragp->fr_fix += fragp->fr_var;
17309 }
17310
17311 /* Return the size of a relaxable immediate operand instruction.
17312 SHIFT and SIZE specify the form of the allowable immediate. */
17313 static int
17314 relax_immediate (fragS *fragp, int size, int shift)
17315 {
17316 offsetT offset;
17317 offsetT mask;
17318 offsetT low;
17319
17320 /* ??? Should be able to do better than this. */
17321 if (fragp->fr_symbol)
17322 return 4;
17323
17324 low = (1 << shift) - 1;
17325 mask = (1 << (shift + size)) - (1 << shift);
17326 offset = fragp->fr_offset;
17327 /* Force misaligned offsets to 32-bit variant. */
17328 if (offset & low)
17329 return 4;
17330 if (offset & ~mask)
17331 return 4;
17332 return 2;
17333 }
17334
17335 /* Get the address of a symbol during relaxation. */
17336 static addressT
17337 relaxed_symbol_addr (fragS *fragp, long stretch)
17338 {
17339 fragS *sym_frag;
17340 addressT addr;
17341 symbolS *sym;
17342
17343 sym = fragp->fr_symbol;
17344 sym_frag = symbol_get_frag (sym);
17345 know (S_GET_SEGMENT (sym) != absolute_section
17346 || sym_frag == &zero_address_frag);
17347 addr = S_GET_VALUE (sym) + fragp->fr_offset;
17348
17349 /* If frag has yet to be reached on this pass, assume it will
17350 move by STRETCH just as we did. If this is not so, it will
17351 be because some frag between grows, and that will force
17352 another pass. */
17353
17354 if (stretch != 0
17355 && sym_frag->relax_marker != fragp->relax_marker)
17356 {
17357 fragS *f;
17358
17359 /* Adjust stretch for any alignment frag. Note that if have
17360 been expanding the earlier code, the symbol may be
17361 defined in what appears to be an earlier frag. FIXME:
17362 This doesn't handle the fr_subtype field, which specifies
17363 a maximum number of bytes to skip when doing an
17364 alignment. */
17365 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
17366 {
17367 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
17368 {
17369 if (stretch < 0)
17370 stretch = - ((- stretch)
17371 & ~ ((1 << (int) f->fr_offset) - 1));
17372 else
17373 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
17374 if (stretch == 0)
17375 break;
17376 }
17377 }
17378 if (f != NULL)
17379 addr += stretch;
17380 }
17381
17382 return addr;
17383 }
17384
17385 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
17386 load. */
17387 static int
17388 relax_adr (fragS *fragp, asection *sec, long stretch)
17389 {
17390 addressT addr;
17391 offsetT val;
17392
17393 /* Assume worst case for symbols not known to be in the same section. */
17394 if (!S_IS_DEFINED (fragp->fr_symbol)
17395 || sec != S_GET_SEGMENT (fragp->fr_symbol))
17396 return 4;
17397
17398 val = relaxed_symbol_addr (fragp, stretch);
17399 addr = fragp->fr_address + fragp->fr_fix;
17400 addr = (addr + 4) & ~3;
17401 /* Force misaligned targets to 32-bit variant. */
17402 if (val & 3)
17403 return 4;
17404 val -= addr;
17405 if (val < 0 || val > 1020)
17406 return 4;
17407 return 2;
17408 }
17409
17410 /* Return the size of a relaxable add/sub immediate instruction. */
17411 static int
17412 relax_addsub (fragS *fragp, asection *sec)
17413 {
17414 char *buf;
17415 int op;
17416
17417 buf = fragp->fr_literal + fragp->fr_fix;
17418 op = bfd_get_16(sec->owner, buf);
17419 if ((op & 0xf) == ((op >> 4) & 0xf))
17420 return relax_immediate (fragp, 8, 0);
17421 else
17422 return relax_immediate (fragp, 3, 0);
17423 }
17424
17425
17426 /* Return the size of a relaxable branch instruction. BITS is the
17427 size of the offset field in the narrow instruction. */
17428
17429 static int
17430 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
17431 {
17432 addressT addr;
17433 offsetT val;
17434 offsetT limit;
17435
17436 /* Assume worst case for symbols not known to be in the same section. */
17437 if (!S_IS_DEFINED (fragp->fr_symbol)
17438 || sec != S_GET_SEGMENT (fragp->fr_symbol))
17439 return 4;
17440
17441 val = relaxed_symbol_addr (fragp, stretch);
17442 addr = fragp->fr_address + fragp->fr_fix + 4;
17443 val -= addr;
17444
17445 /* Offset is a signed value *2 */
17446 limit = 1 << bits;
17447 if (val >= limit || val < -limit)
17448 return 4;
17449 return 2;
17450 }
17451
17452
17453 /* Relax a machine dependent frag. This returns the amount by which
17454 the current size of the frag should change. */
17455
17456 int
17457 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
17458 {
17459 int oldsize;
17460 int newsize;
17461
17462 oldsize = fragp->fr_var;
17463 switch (fragp->fr_subtype)
17464 {
17465 case T_MNEM_ldr_pc2:
17466 newsize = relax_adr (fragp, sec, stretch);
17467 break;
17468 case T_MNEM_ldr_pc:
17469 case T_MNEM_ldr_sp:
17470 case T_MNEM_str_sp:
17471 newsize = relax_immediate (fragp, 8, 2);
17472 break;
17473 case T_MNEM_ldr:
17474 case T_MNEM_str:
17475 newsize = relax_immediate (fragp, 5, 2);
17476 break;
17477 case T_MNEM_ldrh:
17478 case T_MNEM_strh:
17479 newsize = relax_immediate (fragp, 5, 1);
17480 break;
17481 case T_MNEM_ldrb:
17482 case T_MNEM_strb:
17483 newsize = relax_immediate (fragp, 5, 0);
17484 break;
17485 case T_MNEM_adr:
17486 newsize = relax_adr (fragp, sec, stretch);
17487 break;
17488 case T_MNEM_mov:
17489 case T_MNEM_movs:
17490 case T_MNEM_cmp:
17491 case T_MNEM_cmn:
17492 newsize = relax_immediate (fragp, 8, 0);
17493 break;
17494 case T_MNEM_b:
17495 newsize = relax_branch (fragp, sec, 11, stretch);
17496 break;
17497 case T_MNEM_bcond:
17498 newsize = relax_branch (fragp, sec, 8, stretch);
17499 break;
17500 case T_MNEM_add_sp:
17501 case T_MNEM_add_pc:
17502 newsize = relax_immediate (fragp, 8, 2);
17503 break;
17504 case T_MNEM_inc_sp:
17505 case T_MNEM_dec_sp:
17506 newsize = relax_immediate (fragp, 7, 2);
17507 break;
17508 case T_MNEM_addi:
17509 case T_MNEM_addis:
17510 case T_MNEM_subi:
17511 case T_MNEM_subis:
17512 newsize = relax_addsub (fragp, sec);
17513 break;
17514 default:
17515 abort ();
17516 }
17517
17518 fragp->fr_var = newsize;
17519 /* Freeze wide instructions that are at or before the same location as
17520 in the previous pass. This avoids infinite loops.
17521 Don't freeze them unconditionally because targets may be artificially
17522 misaligned by the expansion of preceding frags. */
17523 if (stretch <= 0 && newsize > 2)
17524 {
17525 md_convert_frag (sec->owner, sec, fragp);
17526 frag_wane (fragp);
17527 }
17528
17529 return newsize - oldsize;
17530 }
17531
17532 /* Round up a section size to the appropriate boundary. */
17533
17534 valueT
17535 md_section_align (segT segment ATTRIBUTE_UNUSED,
17536 valueT size)
17537 {
17538 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
17539 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
17540 {
17541 /* For a.out, force the section size to be aligned. If we don't do
17542 this, BFD will align it for us, but it will not write out the
17543 final bytes of the section. This may be a bug in BFD, but it is
17544 easier to fix it here since that is how the other a.out targets
17545 work. */
17546 int align;
17547
17548 align = bfd_get_section_alignment (stdoutput, segment);
17549 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
17550 }
17551 #endif
17552
17553 return size;
17554 }
17555
17556 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
17557 of an rs_align_code fragment. */
17558
17559 void
17560 arm_handle_align (fragS * fragP)
17561 {
17562 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
17563 static char const thumb_noop[2] = { 0xc0, 0x46 };
17564 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
17565 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
17566
17567 int bytes, fix, noop_size;
17568 char * p;
17569 const char * noop;
17570
17571 if (fragP->fr_type != rs_align_code)
17572 return;
17573
17574 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
17575 p = fragP->fr_literal + fragP->fr_fix;
17576 fix = 0;
17577
17578 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
17579 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
17580
17581 assert ((fragP->tc_frag_data & MODE_RECORDED) != 0);
17582
17583 if (fragP->tc_frag_data & (~ MODE_RECORDED))
17584 {
17585 if (target_big_endian)
17586 noop = thumb_bigend_noop;
17587 else
17588 noop = thumb_noop;
17589 noop_size = sizeof (thumb_noop);
17590 }
17591 else
17592 {
17593 if (target_big_endian)
17594 noop = arm_bigend_noop;
17595 else
17596 noop = arm_noop;
17597 noop_size = sizeof (arm_noop);
17598 }
17599
17600 if (bytes & (noop_size - 1))
17601 {
17602 fix = bytes & (noop_size - 1);
17603 memset (p, 0, fix);
17604 p += fix;
17605 bytes -= fix;
17606 }
17607
17608 while (bytes >= noop_size)
17609 {
17610 memcpy (p, noop, noop_size);
17611 p += noop_size;
17612 bytes -= noop_size;
17613 fix += noop_size;
17614 }
17615
17616 fragP->fr_fix += fix;
17617 fragP->fr_var = noop_size;
17618 }
17619
17620 /* Called from md_do_align. Used to create an alignment
17621 frag in a code section. */
17622
17623 void
17624 arm_frag_align_code (int n, int max)
17625 {
17626 char * p;
17627
17628 /* We assume that there will never be a requirement
17629 to support alignments greater than 32 bytes. */
17630 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
17631 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
17632
17633 p = frag_var (rs_align_code,
17634 MAX_MEM_FOR_RS_ALIGN_CODE,
17635 1,
17636 (relax_substateT) max,
17637 (symbolS *) NULL,
17638 (offsetT) n,
17639 (char *) NULL);
17640 *p = 0;
17641 }
17642
17643 /* Perform target specific initialisation of a frag.
17644 Note - despite the name this initialisation is not done when the frag
17645 is created, but only when its type is assigned. A frag can be created
17646 and used a long time before its type is set, so beware of assuming that
17647 this initialisationis performed first. */
17648
17649 void
17650 arm_init_frag (fragS * fragP)
17651 {
17652 /* If the current ARM vs THUMB mode has not already
17653 been recorded into this frag then do so now. */
17654 if ((fragP->tc_frag_data & MODE_RECORDED) == 0)
17655 fragP->tc_frag_data = thumb_mode | MODE_RECORDED;
17656 }
17657
17658 #ifdef OBJ_ELF
17659 /* When we change sections we need to issue a new mapping symbol. */
17660
17661 void
17662 arm_elf_change_section (void)
17663 {
17664 flagword flags;
17665 segment_info_type *seginfo;
17666
17667 /* Link an unlinked unwind index table section to the .text section. */
17668 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
17669 && elf_linked_to_section (now_seg) == NULL)
17670 elf_linked_to_section (now_seg) = text_section;
17671
17672 if (!SEG_NORMAL (now_seg))
17673 return;
17674
17675 flags = bfd_get_section_flags (stdoutput, now_seg);
17676
17677 /* We can ignore sections that only contain debug info. */
17678 if ((flags & SEC_ALLOC) == 0)
17679 return;
17680
17681 seginfo = seg_info (now_seg);
17682 mapstate = seginfo->tc_segment_info_data.mapstate;
17683 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
17684 }
17685
17686 int
17687 arm_elf_section_type (const char * str, size_t len)
17688 {
17689 if (len == 5 && strncmp (str, "exidx", 5) == 0)
17690 return SHT_ARM_EXIDX;
17691
17692 return -1;
17693 }
17694 \f
17695 /* Code to deal with unwinding tables. */
17696
17697 static void add_unwind_adjustsp (offsetT);
17698
17699 /* Generate any deferred unwind frame offset. */
17700
17701 static void
17702 flush_pending_unwind (void)
17703 {
17704 offsetT offset;
17705
17706 offset = unwind.pending_offset;
17707 unwind.pending_offset = 0;
17708 if (offset != 0)
17709 add_unwind_adjustsp (offset);
17710 }
17711
17712 /* Add an opcode to this list for this function. Two-byte opcodes should
17713 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
17714 order. */
17715
17716 static void
17717 add_unwind_opcode (valueT op, int length)
17718 {
17719 /* Add any deferred stack adjustment. */
17720 if (unwind.pending_offset)
17721 flush_pending_unwind ();
17722
17723 unwind.sp_restored = 0;
17724
17725 if (unwind.opcode_count + length > unwind.opcode_alloc)
17726 {
17727 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
17728 if (unwind.opcodes)
17729 unwind.opcodes = xrealloc (unwind.opcodes,
17730 unwind.opcode_alloc);
17731 else
17732 unwind.opcodes = xmalloc (unwind.opcode_alloc);
17733 }
17734 while (length > 0)
17735 {
17736 length--;
17737 unwind.opcodes[unwind.opcode_count] = op & 0xff;
17738 op >>= 8;
17739 unwind.opcode_count++;
17740 }
17741 }
17742
17743 /* Add unwind opcodes to adjust the stack pointer. */
17744
17745 static void
17746 add_unwind_adjustsp (offsetT offset)
17747 {
17748 valueT op;
17749
17750 if (offset > 0x200)
17751 {
17752 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
17753 char bytes[5];
17754 int n;
17755 valueT o;
17756
17757 /* Long form: 0xb2, uleb128. */
17758 /* This might not fit in a word so add the individual bytes,
17759 remembering the list is built in reverse order. */
17760 o = (valueT) ((offset - 0x204) >> 2);
17761 if (o == 0)
17762 add_unwind_opcode (0, 1);
17763
17764 /* Calculate the uleb128 encoding of the offset. */
17765 n = 0;
17766 while (o)
17767 {
17768 bytes[n] = o & 0x7f;
17769 o >>= 7;
17770 if (o)
17771 bytes[n] |= 0x80;
17772 n++;
17773 }
17774 /* Add the insn. */
17775 for (; n; n--)
17776 add_unwind_opcode (bytes[n - 1], 1);
17777 add_unwind_opcode (0xb2, 1);
17778 }
17779 else if (offset > 0x100)
17780 {
17781 /* Two short opcodes. */
17782 add_unwind_opcode (0x3f, 1);
17783 op = (offset - 0x104) >> 2;
17784 add_unwind_opcode (op, 1);
17785 }
17786 else if (offset > 0)
17787 {
17788 /* Short opcode. */
17789 op = (offset - 4) >> 2;
17790 add_unwind_opcode (op, 1);
17791 }
17792 else if (offset < 0)
17793 {
17794 offset = -offset;
17795 while (offset > 0x100)
17796 {
17797 add_unwind_opcode (0x7f, 1);
17798 offset -= 0x100;
17799 }
17800 op = ((offset - 4) >> 2) | 0x40;
17801 add_unwind_opcode (op, 1);
17802 }
17803 }
17804
17805 /* Finish the list of unwind opcodes for this function. */
17806 static void
17807 finish_unwind_opcodes (void)
17808 {
17809 valueT op;
17810
17811 if (unwind.fp_used)
17812 {
17813 /* Adjust sp as necessary. */
17814 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
17815 flush_pending_unwind ();
17816
17817 /* After restoring sp from the frame pointer. */
17818 op = 0x90 | unwind.fp_reg;
17819 add_unwind_opcode (op, 1);
17820 }
17821 else
17822 flush_pending_unwind ();
17823 }
17824
17825
17826 /* Start an exception table entry. If idx is nonzero this is an index table
17827 entry. */
17828
17829 static void
17830 start_unwind_section (const segT text_seg, int idx)
17831 {
17832 const char * text_name;
17833 const char * prefix;
17834 const char * prefix_once;
17835 const char * group_name;
17836 size_t prefix_len;
17837 size_t text_len;
17838 char * sec_name;
17839 size_t sec_name_len;
17840 int type;
17841 int flags;
17842 int linkonce;
17843
17844 if (idx)
17845 {
17846 prefix = ELF_STRING_ARM_unwind;
17847 prefix_once = ELF_STRING_ARM_unwind_once;
17848 type = SHT_ARM_EXIDX;
17849 }
17850 else
17851 {
17852 prefix = ELF_STRING_ARM_unwind_info;
17853 prefix_once = ELF_STRING_ARM_unwind_info_once;
17854 type = SHT_PROGBITS;
17855 }
17856
17857 text_name = segment_name (text_seg);
17858 if (streq (text_name, ".text"))
17859 text_name = "";
17860
17861 if (strncmp (text_name, ".gnu.linkonce.t.",
17862 strlen (".gnu.linkonce.t.")) == 0)
17863 {
17864 prefix = prefix_once;
17865 text_name += strlen (".gnu.linkonce.t.");
17866 }
17867
17868 prefix_len = strlen (prefix);
17869 text_len = strlen (text_name);
17870 sec_name_len = prefix_len + text_len;
17871 sec_name = xmalloc (sec_name_len + 1);
17872 memcpy (sec_name, prefix, prefix_len);
17873 memcpy (sec_name + prefix_len, text_name, text_len);
17874 sec_name[prefix_len + text_len] = '\0';
17875
17876 flags = SHF_ALLOC;
17877 linkonce = 0;
17878 group_name = 0;
17879
17880 /* Handle COMDAT group. */
17881 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
17882 {
17883 group_name = elf_group_name (text_seg);
17884 if (group_name == NULL)
17885 {
17886 as_bad (_("Group section `%s' has no group signature"),
17887 segment_name (text_seg));
17888 ignore_rest_of_line ();
17889 return;
17890 }
17891 flags |= SHF_GROUP;
17892 linkonce = 1;
17893 }
17894
17895 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
17896
17897 /* Set the section link for index tables. */
17898 if (idx)
17899 elf_linked_to_section (now_seg) = text_seg;
17900 }
17901
17902
17903 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17904 personality routine data. Returns zero, or the index table value for
17905 and inline entry. */
17906
17907 static valueT
17908 create_unwind_entry (int have_data)
17909 {
17910 int size;
17911 addressT where;
17912 char *ptr;
17913 /* The current word of data. */
17914 valueT data;
17915 /* The number of bytes left in this word. */
17916 int n;
17917
17918 finish_unwind_opcodes ();
17919
17920 /* Remember the current text section. */
17921 unwind.saved_seg = now_seg;
17922 unwind.saved_subseg = now_subseg;
17923
17924 start_unwind_section (now_seg, 0);
17925
17926 if (unwind.personality_routine == NULL)
17927 {
17928 if (unwind.personality_index == -2)
17929 {
17930 if (have_data)
17931 as_bad (_("handlerdata in cantunwind frame"));
17932 return 1; /* EXIDX_CANTUNWIND. */
17933 }
17934
17935 /* Use a default personality routine if none is specified. */
17936 if (unwind.personality_index == -1)
17937 {
17938 if (unwind.opcode_count > 3)
17939 unwind.personality_index = 1;
17940 else
17941 unwind.personality_index = 0;
17942 }
17943
17944 /* Space for the personality routine entry. */
17945 if (unwind.personality_index == 0)
17946 {
17947 if (unwind.opcode_count > 3)
17948 as_bad (_("too many unwind opcodes for personality routine 0"));
17949
17950 if (!have_data)
17951 {
17952 /* All the data is inline in the index table. */
17953 data = 0x80;
17954 n = 3;
17955 while (unwind.opcode_count > 0)
17956 {
17957 unwind.opcode_count--;
17958 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17959 n--;
17960 }
17961
17962 /* Pad with "finish" opcodes. */
17963 while (n--)
17964 data = (data << 8) | 0xb0;
17965
17966 return data;
17967 }
17968 size = 0;
17969 }
17970 else
17971 /* We get two opcodes "free" in the first word. */
17972 size = unwind.opcode_count - 2;
17973 }
17974 else
17975 /* An extra byte is required for the opcode count. */
17976 size = unwind.opcode_count + 1;
17977
17978 size = (size + 3) >> 2;
17979 if (size > 0xff)
17980 as_bad (_("too many unwind opcodes"));
17981
17982 frag_align (2, 0, 0);
17983 record_alignment (now_seg, 2);
17984 unwind.table_entry = expr_build_dot ();
17985
17986 /* Allocate the table entry. */
17987 ptr = frag_more ((size << 2) + 4);
17988 where = frag_now_fix () - ((size << 2) + 4);
17989
17990 switch (unwind.personality_index)
17991 {
17992 case -1:
17993 /* ??? Should this be a PLT generating relocation? */
17994 /* Custom personality routine. */
17995 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17996 BFD_RELOC_ARM_PREL31);
17997
17998 where += 4;
17999 ptr += 4;
18000
18001 /* Set the first byte to the number of additional words. */
18002 data = size - 1;
18003 n = 3;
18004 break;
18005
18006 /* ABI defined personality routines. */
18007 case 0:
18008 /* Three opcodes bytes are packed into the first word. */
18009 data = 0x80;
18010 n = 3;
18011 break;
18012
18013 case 1:
18014 case 2:
18015 /* The size and first two opcode bytes go in the first word. */
18016 data = ((0x80 + unwind.personality_index) << 8) | size;
18017 n = 2;
18018 break;
18019
18020 default:
18021 /* Should never happen. */
18022 abort ();
18023 }
18024
18025 /* Pack the opcodes into words (MSB first), reversing the list at the same
18026 time. */
18027 while (unwind.opcode_count > 0)
18028 {
18029 if (n == 0)
18030 {
18031 md_number_to_chars (ptr, data, 4);
18032 ptr += 4;
18033 n = 4;
18034 data = 0;
18035 }
18036 unwind.opcode_count--;
18037 n--;
18038 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
18039 }
18040
18041 /* Finish off the last word. */
18042 if (n < 4)
18043 {
18044 /* Pad with "finish" opcodes. */
18045 while (n--)
18046 data = (data << 8) | 0xb0;
18047
18048 md_number_to_chars (ptr, data, 4);
18049 }
18050
18051 if (!have_data)
18052 {
18053 /* Add an empty descriptor if there is no user-specified data. */
18054 ptr = frag_more (4);
18055 md_number_to_chars (ptr, 0, 4);
18056 }
18057
18058 return 0;
18059 }
18060
18061
18062 /* Initialize the DWARF-2 unwind information for this procedure. */
18063
18064 void
18065 tc_arm_frame_initial_instructions (void)
18066 {
18067 cfi_add_CFA_def_cfa (REG_SP, 0);
18068 }
18069 #endif /* OBJ_ELF */
18070
18071 /* Convert REGNAME to a DWARF-2 register number. */
18072
18073 int
18074 tc_arm_regname_to_dw2regnum (char *regname)
18075 {
18076 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
18077
18078 if (reg == FAIL)
18079 return -1;
18080
18081 return reg;
18082 }
18083
18084 #ifdef TE_PE
18085 void
18086 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
18087 {
18088 expressionS expr;
18089
18090 expr.X_op = O_secrel;
18091 expr.X_add_symbol = symbol;
18092 expr.X_add_number = 0;
18093 emit_expr (&expr, size);
18094 }
18095 #endif
18096
18097 /* MD interface: Symbol and relocation handling. */
18098
18099 /* Return the address within the segment that a PC-relative fixup is
18100 relative to. For ARM, PC-relative fixups applied to instructions
18101 are generally relative to the location of the fixup plus 8 bytes.
18102 Thumb branches are offset by 4, and Thumb loads relative to PC
18103 require special handling. */
18104
18105 long
18106 md_pcrel_from_section (fixS * fixP, segT seg)
18107 {
18108 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
18109
18110 /* If this is pc-relative and we are going to emit a relocation
18111 then we just want to put out any pipeline compensation that the linker
18112 will need. Otherwise we want to use the calculated base.
18113 For WinCE we skip the bias for externals as well, since this
18114 is how the MS ARM-CE assembler behaves and we want to be compatible. */
18115 if (fixP->fx_pcrel
18116 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
18117 || (arm_force_relocation (fixP)
18118 #ifdef TE_WINCE
18119 && !S_IS_EXTERNAL (fixP->fx_addsy)
18120 #endif
18121 )))
18122 base = 0;
18123
18124 switch (fixP->fx_r_type)
18125 {
18126 /* PC relative addressing on the Thumb is slightly odd as the
18127 bottom two bits of the PC are forced to zero for the
18128 calculation. This happens *after* application of the
18129 pipeline offset. However, Thumb adrl already adjusts for
18130 this, so we need not do it again. */
18131 case BFD_RELOC_ARM_THUMB_ADD:
18132 return base & ~3;
18133
18134 case BFD_RELOC_ARM_THUMB_OFFSET:
18135 case BFD_RELOC_ARM_T32_OFFSET_IMM:
18136 case BFD_RELOC_ARM_T32_ADD_PC12:
18137 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18138 return (base + 4) & ~3;
18139
18140 /* Thumb branches are simply offset by +4. */
18141 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18142 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18143 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18144 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18145 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18146 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18147 case BFD_RELOC_THUMB_PCREL_BLX:
18148 return base + 4;
18149
18150 /* ARM mode branches are offset by +8. However, the Windows CE
18151 loader expects the relocation not to take this into account. */
18152 case BFD_RELOC_ARM_PCREL_BRANCH:
18153 case BFD_RELOC_ARM_PCREL_CALL:
18154 case BFD_RELOC_ARM_PCREL_JUMP:
18155 case BFD_RELOC_ARM_PCREL_BLX:
18156 case BFD_RELOC_ARM_PLT32:
18157 #ifdef TE_WINCE
18158 /* When handling fixups immediately, because we have already
18159 discovered the value of a symbol, or the address of the frag involved
18160 we must account for the offset by +8, as the OS loader will never see the reloc.
18161 see fixup_segment() in write.c
18162 The S_IS_EXTERNAL test handles the case of global symbols.
18163 Those need the calculated base, not just the pipe compensation the linker will need. */
18164 if (fixP->fx_pcrel
18165 && fixP->fx_addsy != NULL
18166 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
18167 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
18168 return base + 8;
18169 return base;
18170 #else
18171 return base + 8;
18172 #endif
18173
18174 /* ARM mode loads relative to PC are also offset by +8. Unlike
18175 branches, the Windows CE loader *does* expect the relocation
18176 to take this into account. */
18177 case BFD_RELOC_ARM_OFFSET_IMM:
18178 case BFD_RELOC_ARM_OFFSET_IMM8:
18179 case BFD_RELOC_ARM_HWLITERAL:
18180 case BFD_RELOC_ARM_LITERAL:
18181 case BFD_RELOC_ARM_CP_OFF_IMM:
18182 return base + 8;
18183
18184
18185 /* Other PC-relative relocations are un-offset. */
18186 default:
18187 return base;
18188 }
18189 }
18190
18191 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
18192 Otherwise we have no need to default values of symbols. */
18193
18194 symbolS *
18195 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
18196 {
18197 #ifdef OBJ_ELF
18198 if (name[0] == '_' && name[1] == 'G'
18199 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
18200 {
18201 if (!GOT_symbol)
18202 {
18203 if (symbol_find (name))
18204 as_bad (_("GOT already in the symbol table"));
18205
18206 GOT_symbol = symbol_new (name, undefined_section,
18207 (valueT) 0, & zero_address_frag);
18208 }
18209
18210 return GOT_symbol;
18211 }
18212 #endif
18213
18214 return 0;
18215 }
18216
18217 /* Subroutine of md_apply_fix. Check to see if an immediate can be
18218 computed as two separate immediate values, added together. We
18219 already know that this value cannot be computed by just one ARM
18220 instruction. */
18221
18222 static unsigned int
18223 validate_immediate_twopart (unsigned int val,
18224 unsigned int * highpart)
18225 {
18226 unsigned int a;
18227 unsigned int i;
18228
18229 for (i = 0; i < 32; i += 2)
18230 if (((a = rotate_left (val, i)) & 0xff) != 0)
18231 {
18232 if (a & 0xff00)
18233 {
18234 if (a & ~ 0xffff)
18235 continue;
18236 * highpart = (a >> 8) | ((i + 24) << 7);
18237 }
18238 else if (a & 0xff0000)
18239 {
18240 if (a & 0xff000000)
18241 continue;
18242 * highpart = (a >> 16) | ((i + 16) << 7);
18243 }
18244 else
18245 {
18246 assert (a & 0xff000000);
18247 * highpart = (a >> 24) | ((i + 8) << 7);
18248 }
18249
18250 return (a & 0xff) | (i << 7);
18251 }
18252
18253 return FAIL;
18254 }
18255
18256 static int
18257 validate_offset_imm (unsigned int val, int hwse)
18258 {
18259 if ((hwse && val > 255) || val > 4095)
18260 return FAIL;
18261 return val;
18262 }
18263
18264 /* Subroutine of md_apply_fix. Do those data_ops which can take a
18265 negative immediate constant by altering the instruction. A bit of
18266 a hack really.
18267 MOV <-> MVN
18268 AND <-> BIC
18269 ADC <-> SBC
18270 by inverting the second operand, and
18271 ADD <-> SUB
18272 CMP <-> CMN
18273 by negating the second operand. */
18274
18275 static int
18276 negate_data_op (unsigned long * instruction,
18277 unsigned long value)
18278 {
18279 int op, new_inst;
18280 unsigned long negated, inverted;
18281
18282 negated = encode_arm_immediate (-value);
18283 inverted = encode_arm_immediate (~value);
18284
18285 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
18286 switch (op)
18287 {
18288 /* First negates. */
18289 case OPCODE_SUB: /* ADD <-> SUB */
18290 new_inst = OPCODE_ADD;
18291 value = negated;
18292 break;
18293
18294 case OPCODE_ADD:
18295 new_inst = OPCODE_SUB;
18296 value = negated;
18297 break;
18298
18299 case OPCODE_CMP: /* CMP <-> CMN */
18300 new_inst = OPCODE_CMN;
18301 value = negated;
18302 break;
18303
18304 case OPCODE_CMN:
18305 new_inst = OPCODE_CMP;
18306 value = negated;
18307 break;
18308
18309 /* Now Inverted ops. */
18310 case OPCODE_MOV: /* MOV <-> MVN */
18311 new_inst = OPCODE_MVN;
18312 value = inverted;
18313 break;
18314
18315 case OPCODE_MVN:
18316 new_inst = OPCODE_MOV;
18317 value = inverted;
18318 break;
18319
18320 case OPCODE_AND: /* AND <-> BIC */
18321 new_inst = OPCODE_BIC;
18322 value = inverted;
18323 break;
18324
18325 case OPCODE_BIC:
18326 new_inst = OPCODE_AND;
18327 value = inverted;
18328 break;
18329
18330 case OPCODE_ADC: /* ADC <-> SBC */
18331 new_inst = OPCODE_SBC;
18332 value = inverted;
18333 break;
18334
18335 case OPCODE_SBC:
18336 new_inst = OPCODE_ADC;
18337 value = inverted;
18338 break;
18339
18340 /* We cannot do anything. */
18341 default:
18342 return FAIL;
18343 }
18344
18345 if (value == (unsigned) FAIL)
18346 return FAIL;
18347
18348 *instruction &= OPCODE_MASK;
18349 *instruction |= new_inst << DATA_OP_SHIFT;
18350 return value;
18351 }
18352
18353 /* Like negate_data_op, but for Thumb-2. */
18354
18355 static unsigned int
18356 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
18357 {
18358 int op, new_inst;
18359 int rd;
18360 unsigned int negated, inverted;
18361
18362 negated = encode_thumb32_immediate (-value);
18363 inverted = encode_thumb32_immediate (~value);
18364
18365 rd = (*instruction >> 8) & 0xf;
18366 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
18367 switch (op)
18368 {
18369 /* ADD <-> SUB. Includes CMP <-> CMN. */
18370 case T2_OPCODE_SUB:
18371 new_inst = T2_OPCODE_ADD;
18372 value = negated;
18373 break;
18374
18375 case T2_OPCODE_ADD:
18376 new_inst = T2_OPCODE_SUB;
18377 value = negated;
18378 break;
18379
18380 /* ORR <-> ORN. Includes MOV <-> MVN. */
18381 case T2_OPCODE_ORR:
18382 new_inst = T2_OPCODE_ORN;
18383 value = inverted;
18384 break;
18385
18386 case T2_OPCODE_ORN:
18387 new_inst = T2_OPCODE_ORR;
18388 value = inverted;
18389 break;
18390
18391 /* AND <-> BIC. TST has no inverted equivalent. */
18392 case T2_OPCODE_AND:
18393 new_inst = T2_OPCODE_BIC;
18394 if (rd == 15)
18395 value = FAIL;
18396 else
18397 value = inverted;
18398 break;
18399
18400 case T2_OPCODE_BIC:
18401 new_inst = T2_OPCODE_AND;
18402 value = inverted;
18403 break;
18404
18405 /* ADC <-> SBC */
18406 case T2_OPCODE_ADC:
18407 new_inst = T2_OPCODE_SBC;
18408 value = inverted;
18409 break;
18410
18411 case T2_OPCODE_SBC:
18412 new_inst = T2_OPCODE_ADC;
18413 value = inverted;
18414 break;
18415
18416 /* We cannot do anything. */
18417 default:
18418 return FAIL;
18419 }
18420
18421 if (value == (unsigned int)FAIL)
18422 return FAIL;
18423
18424 *instruction &= T2_OPCODE_MASK;
18425 *instruction |= new_inst << T2_DATA_OP_SHIFT;
18426 return value;
18427 }
18428
18429 /* Read a 32-bit thumb instruction from buf. */
18430 static unsigned long
18431 get_thumb32_insn (char * buf)
18432 {
18433 unsigned long insn;
18434 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
18435 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18436
18437 return insn;
18438 }
18439
18440
18441 /* We usually want to set the low bit on the address of thumb function
18442 symbols. In particular .word foo - . should have the low bit set.
18443 Generic code tries to fold the difference of two symbols to
18444 a constant. Prevent this and force a relocation when the first symbols
18445 is a thumb function. */
18446 int
18447 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
18448 {
18449 if (op == O_subtract
18450 && l->X_op == O_symbol
18451 && r->X_op == O_symbol
18452 && THUMB_IS_FUNC (l->X_add_symbol))
18453 {
18454 l->X_op = O_subtract;
18455 l->X_op_symbol = r->X_add_symbol;
18456 l->X_add_number -= r->X_add_number;
18457 return 1;
18458 }
18459 /* Process as normal. */
18460 return 0;
18461 }
18462
18463 void
18464 md_apply_fix (fixS * fixP,
18465 valueT * valP,
18466 segT seg)
18467 {
18468 offsetT value = * valP;
18469 offsetT newval;
18470 unsigned int newimm;
18471 unsigned long temp;
18472 int sign;
18473 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
18474
18475 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
18476
18477 /* Note whether this will delete the relocation. */
18478
18479 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
18480 fixP->fx_done = 1;
18481
18482 /* On a 64-bit host, silently truncate 'value' to 32 bits for
18483 consistency with the behaviour on 32-bit hosts. Remember value
18484 for emit_reloc. */
18485 value &= 0xffffffff;
18486 value ^= 0x80000000;
18487 value -= 0x80000000;
18488
18489 *valP = value;
18490 fixP->fx_addnumber = value;
18491
18492 /* Same treatment for fixP->fx_offset. */
18493 fixP->fx_offset &= 0xffffffff;
18494 fixP->fx_offset ^= 0x80000000;
18495 fixP->fx_offset -= 0x80000000;
18496
18497 switch (fixP->fx_r_type)
18498 {
18499 case BFD_RELOC_NONE:
18500 /* This will need to go in the object file. */
18501 fixP->fx_done = 0;
18502 break;
18503
18504 case BFD_RELOC_ARM_IMMEDIATE:
18505 /* We claim that this fixup has been processed here,
18506 even if in fact we generate an error because we do
18507 not have a reloc for it, so tc_gen_reloc will reject it. */
18508 fixP->fx_done = 1;
18509
18510 if (fixP->fx_addsy
18511 && ! S_IS_DEFINED (fixP->fx_addsy))
18512 {
18513 as_bad_where (fixP->fx_file, fixP->fx_line,
18514 _("undefined symbol %s used as an immediate value"),
18515 S_GET_NAME (fixP->fx_addsy));
18516 break;
18517 }
18518
18519 newimm = encode_arm_immediate (value);
18520 temp = md_chars_to_number (buf, INSN_SIZE);
18521
18522 /* If the instruction will fail, see if we can fix things up by
18523 changing the opcode. */
18524 if (newimm == (unsigned int) FAIL
18525 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
18526 {
18527 as_bad_where (fixP->fx_file, fixP->fx_line,
18528 _("invalid constant (%lx) after fixup"),
18529 (unsigned long) value);
18530 break;
18531 }
18532
18533 newimm |= (temp & 0xfffff000);
18534 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
18535 break;
18536
18537 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
18538 {
18539 unsigned int highpart = 0;
18540 unsigned int newinsn = 0xe1a00000; /* nop. */
18541
18542 newimm = encode_arm_immediate (value);
18543 temp = md_chars_to_number (buf, INSN_SIZE);
18544
18545 /* If the instruction will fail, see if we can fix things up by
18546 changing the opcode. */
18547 if (newimm == (unsigned int) FAIL
18548 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
18549 {
18550 /* No ? OK - try using two ADD instructions to generate
18551 the value. */
18552 newimm = validate_immediate_twopart (value, & highpart);
18553
18554 /* Yes - then make sure that the second instruction is
18555 also an add. */
18556 if (newimm != (unsigned int) FAIL)
18557 newinsn = temp;
18558 /* Still No ? Try using a negated value. */
18559 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
18560 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
18561 /* Otherwise - give up. */
18562 else
18563 {
18564 as_bad_where (fixP->fx_file, fixP->fx_line,
18565 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
18566 (long) value);
18567 break;
18568 }
18569
18570 /* Replace the first operand in the 2nd instruction (which
18571 is the PC) with the destination register. We have
18572 already added in the PC in the first instruction and we
18573 do not want to do it again. */
18574 newinsn &= ~ 0xf0000;
18575 newinsn |= ((newinsn & 0x0f000) << 4);
18576 }
18577
18578 newimm |= (temp & 0xfffff000);
18579 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
18580
18581 highpart |= (newinsn & 0xfffff000);
18582 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
18583 }
18584 break;
18585
18586 case BFD_RELOC_ARM_OFFSET_IMM:
18587 if (!fixP->fx_done && seg->use_rela_p)
18588 value = 0;
18589
18590 case BFD_RELOC_ARM_LITERAL:
18591 sign = value >= 0;
18592
18593 if (value < 0)
18594 value = - value;
18595
18596 if (validate_offset_imm (value, 0) == FAIL)
18597 {
18598 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
18599 as_bad_where (fixP->fx_file, fixP->fx_line,
18600 _("invalid literal constant: pool needs to be closer"));
18601 else
18602 as_bad_where (fixP->fx_file, fixP->fx_line,
18603 _("bad immediate value for offset (%ld)"),
18604 (long) value);
18605 break;
18606 }
18607
18608 newval = md_chars_to_number (buf, INSN_SIZE);
18609 newval &= 0xff7ff000;
18610 newval |= value | (sign ? INDEX_UP : 0);
18611 md_number_to_chars (buf, newval, INSN_SIZE);
18612 break;
18613
18614 case BFD_RELOC_ARM_OFFSET_IMM8:
18615 case BFD_RELOC_ARM_HWLITERAL:
18616 sign = value >= 0;
18617
18618 if (value < 0)
18619 value = - value;
18620
18621 if (validate_offset_imm (value, 1) == FAIL)
18622 {
18623 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
18624 as_bad_where (fixP->fx_file, fixP->fx_line,
18625 _("invalid literal constant: pool needs to be closer"));
18626 else
18627 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
18628 (long) value);
18629 break;
18630 }
18631
18632 newval = md_chars_to_number (buf, INSN_SIZE);
18633 newval &= 0xff7ff0f0;
18634 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
18635 md_number_to_chars (buf, newval, INSN_SIZE);
18636 break;
18637
18638 case BFD_RELOC_ARM_T32_OFFSET_U8:
18639 if (value < 0 || value > 1020 || value % 4 != 0)
18640 as_bad_where (fixP->fx_file, fixP->fx_line,
18641 _("bad immediate value for offset (%ld)"), (long) value);
18642 value /= 4;
18643
18644 newval = md_chars_to_number (buf+2, THUMB_SIZE);
18645 newval |= value;
18646 md_number_to_chars (buf+2, newval, THUMB_SIZE);
18647 break;
18648
18649 case BFD_RELOC_ARM_T32_OFFSET_IMM:
18650 /* This is a complicated relocation used for all varieties of Thumb32
18651 load/store instruction with immediate offset:
18652
18653 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
18654 *4, optional writeback(W)
18655 (doubleword load/store)
18656
18657 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
18658 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
18659 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
18660 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
18661 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
18662
18663 Uppercase letters indicate bits that are already encoded at
18664 this point. Lowercase letters are our problem. For the
18665 second block of instructions, the secondary opcode nybble
18666 (bits 8..11) is present, and bit 23 is zero, even if this is
18667 a PC-relative operation. */
18668 newval = md_chars_to_number (buf, THUMB_SIZE);
18669 newval <<= 16;
18670 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
18671
18672 if ((newval & 0xf0000000) == 0xe0000000)
18673 {
18674 /* Doubleword load/store: 8-bit offset, scaled by 4. */
18675 if (value >= 0)
18676 newval |= (1 << 23);
18677 else
18678 value = -value;
18679 if (value % 4 != 0)
18680 {
18681 as_bad_where (fixP->fx_file, fixP->fx_line,
18682 _("offset not a multiple of 4"));
18683 break;
18684 }
18685 value /= 4;
18686 if (value > 0xff)
18687 {
18688 as_bad_where (fixP->fx_file, fixP->fx_line,
18689 _("offset out of range"));
18690 break;
18691 }
18692 newval &= ~0xff;
18693 }
18694 else if ((newval & 0x000f0000) == 0x000f0000)
18695 {
18696 /* PC-relative, 12-bit offset. */
18697 if (value >= 0)
18698 newval |= (1 << 23);
18699 else
18700 value = -value;
18701 if (value > 0xfff)
18702 {
18703 as_bad_where (fixP->fx_file, fixP->fx_line,
18704 _("offset out of range"));
18705 break;
18706 }
18707 newval &= ~0xfff;
18708 }
18709 else if ((newval & 0x00000100) == 0x00000100)
18710 {
18711 /* Writeback: 8-bit, +/- offset. */
18712 if (value >= 0)
18713 newval |= (1 << 9);
18714 else
18715 value = -value;
18716 if (value > 0xff)
18717 {
18718 as_bad_where (fixP->fx_file, fixP->fx_line,
18719 _("offset out of range"));
18720 break;
18721 }
18722 newval &= ~0xff;
18723 }
18724 else if ((newval & 0x00000f00) == 0x00000e00)
18725 {
18726 /* T-instruction: positive 8-bit offset. */
18727 if (value < 0 || value > 0xff)
18728 {
18729 as_bad_where (fixP->fx_file, fixP->fx_line,
18730 _("offset out of range"));
18731 break;
18732 }
18733 newval &= ~0xff;
18734 newval |= value;
18735 }
18736 else
18737 {
18738 /* Positive 12-bit or negative 8-bit offset. */
18739 int limit;
18740 if (value >= 0)
18741 {
18742 newval |= (1 << 23);
18743 limit = 0xfff;
18744 }
18745 else
18746 {
18747 value = -value;
18748 limit = 0xff;
18749 }
18750 if (value > limit)
18751 {
18752 as_bad_where (fixP->fx_file, fixP->fx_line,
18753 _("offset out of range"));
18754 break;
18755 }
18756 newval &= ~limit;
18757 }
18758
18759 newval |= value;
18760 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
18761 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
18762 break;
18763
18764 case BFD_RELOC_ARM_SHIFT_IMM:
18765 newval = md_chars_to_number (buf, INSN_SIZE);
18766 if (((unsigned long) value) > 32
18767 || (value == 32
18768 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
18769 {
18770 as_bad_where (fixP->fx_file, fixP->fx_line,
18771 _("shift expression is too large"));
18772 break;
18773 }
18774
18775 if (value == 0)
18776 /* Shifts of zero must be done as lsl. */
18777 newval &= ~0x60;
18778 else if (value == 32)
18779 value = 0;
18780 newval &= 0xfffff07f;
18781 newval |= (value & 0x1f) << 7;
18782 md_number_to_chars (buf, newval, INSN_SIZE);
18783 break;
18784
18785 case BFD_RELOC_ARM_T32_IMMEDIATE:
18786 case BFD_RELOC_ARM_T32_ADD_IMM:
18787 case BFD_RELOC_ARM_T32_IMM12:
18788 case BFD_RELOC_ARM_T32_ADD_PC12:
18789 /* We claim that this fixup has been processed here,
18790 even if in fact we generate an error because we do
18791 not have a reloc for it, so tc_gen_reloc will reject it. */
18792 fixP->fx_done = 1;
18793
18794 if (fixP->fx_addsy
18795 && ! S_IS_DEFINED (fixP->fx_addsy))
18796 {
18797 as_bad_where (fixP->fx_file, fixP->fx_line,
18798 _("undefined symbol %s used as an immediate value"),
18799 S_GET_NAME (fixP->fx_addsy));
18800 break;
18801 }
18802
18803 newval = md_chars_to_number (buf, THUMB_SIZE);
18804 newval <<= 16;
18805 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
18806
18807 newimm = FAIL;
18808 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18809 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18810 {
18811 newimm = encode_thumb32_immediate (value);
18812 if (newimm == (unsigned int) FAIL)
18813 newimm = thumb32_negate_data_op (&newval, value);
18814 }
18815 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
18816 && newimm == (unsigned int) FAIL)
18817 {
18818 /* Turn add/sum into addw/subw. */
18819 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18820 newval = (newval & 0xfeffffff) | 0x02000000;
18821
18822 /* 12 bit immediate for addw/subw. */
18823 if (value < 0)
18824 {
18825 value = -value;
18826 newval ^= 0x00a00000;
18827 }
18828 if (value > 0xfff)
18829 newimm = (unsigned int) FAIL;
18830 else
18831 newimm = value;
18832 }
18833
18834 if (newimm == (unsigned int)FAIL)
18835 {
18836 as_bad_where (fixP->fx_file, fixP->fx_line,
18837 _("invalid constant (%lx) after fixup"),
18838 (unsigned long) value);
18839 break;
18840 }
18841
18842 newval |= (newimm & 0x800) << 15;
18843 newval |= (newimm & 0x700) << 4;
18844 newval |= (newimm & 0x0ff);
18845
18846 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
18847 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
18848 break;
18849
18850 case BFD_RELOC_ARM_SMC:
18851 if (((unsigned long) value) > 0xffff)
18852 as_bad_where (fixP->fx_file, fixP->fx_line,
18853 _("invalid smc expression"));
18854 newval = md_chars_to_number (buf, INSN_SIZE);
18855 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
18856 md_number_to_chars (buf, newval, INSN_SIZE);
18857 break;
18858
18859 case BFD_RELOC_ARM_SWI:
18860 if (fixP->tc_fix_data != 0)
18861 {
18862 if (((unsigned long) value) > 0xff)
18863 as_bad_where (fixP->fx_file, fixP->fx_line,
18864 _("invalid swi expression"));
18865 newval = md_chars_to_number (buf, THUMB_SIZE);
18866 newval |= value;
18867 md_number_to_chars (buf, newval, THUMB_SIZE);
18868 }
18869 else
18870 {
18871 if (((unsigned long) value) > 0x00ffffff)
18872 as_bad_where (fixP->fx_file, fixP->fx_line,
18873 _("invalid swi expression"));
18874 newval = md_chars_to_number (buf, INSN_SIZE);
18875 newval |= value;
18876 md_number_to_chars (buf, newval, INSN_SIZE);
18877 }
18878 break;
18879
18880 case BFD_RELOC_ARM_MULTI:
18881 if (((unsigned long) value) > 0xffff)
18882 as_bad_where (fixP->fx_file, fixP->fx_line,
18883 _("invalid expression in load/store multiple"));
18884 newval = value | md_chars_to_number (buf, INSN_SIZE);
18885 md_number_to_chars (buf, newval, INSN_SIZE);
18886 break;
18887
18888 #ifdef OBJ_ELF
18889 case BFD_RELOC_ARM_PCREL_CALL:
18890 newval = md_chars_to_number (buf, INSN_SIZE);
18891 if ((newval & 0xf0000000) == 0xf0000000)
18892 temp = 1;
18893 else
18894 temp = 3;
18895 goto arm_branch_common;
18896
18897 case BFD_RELOC_ARM_PCREL_JUMP:
18898 case BFD_RELOC_ARM_PLT32:
18899 #endif
18900 case BFD_RELOC_ARM_PCREL_BRANCH:
18901 temp = 3;
18902 goto arm_branch_common;
18903
18904 case BFD_RELOC_ARM_PCREL_BLX:
18905 temp = 1;
18906 arm_branch_common:
18907 /* We are going to store value (shifted right by two) in the
18908 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18909 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18910 also be be clear. */
18911 if (value & temp)
18912 as_bad_where (fixP->fx_file, fixP->fx_line,
18913 _("misaligned branch destination"));
18914 if ((value & (offsetT)0xfe000000) != (offsetT)0
18915 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
18916 as_bad_where (fixP->fx_file, fixP->fx_line,
18917 _("branch out of range"));
18918
18919 if (fixP->fx_done || !seg->use_rela_p)
18920 {
18921 newval = md_chars_to_number (buf, INSN_SIZE);
18922 newval |= (value >> 2) & 0x00ffffff;
18923 /* Set the H bit on BLX instructions. */
18924 if (temp == 1)
18925 {
18926 if (value & 2)
18927 newval |= 0x01000000;
18928 else
18929 newval &= ~0x01000000;
18930 }
18931 md_number_to_chars (buf, newval, INSN_SIZE);
18932 }
18933 break;
18934
18935 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
18936 /* CBZ can only branch forward. */
18937
18938 /* Attempts to use CBZ to branch to the next instruction
18939 (which, strictly speaking, are prohibited) will be turned into
18940 no-ops.
18941
18942 FIXME: It may be better to remove the instruction completely and
18943 perform relaxation. */
18944 if (value == -2)
18945 {
18946 newval = md_chars_to_number (buf, THUMB_SIZE);
18947 newval = 0xbf00; /* NOP encoding T1 */
18948 md_number_to_chars (buf, newval, THUMB_SIZE);
18949 }
18950 else
18951 {
18952 if (value & ~0x7e)
18953 as_bad_where (fixP->fx_file, fixP->fx_line,
18954 _("branch out of range"));
18955
18956 if (fixP->fx_done || !seg->use_rela_p)
18957 {
18958 newval = md_chars_to_number (buf, THUMB_SIZE);
18959 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
18960 md_number_to_chars (buf, newval, THUMB_SIZE);
18961 }
18962 }
18963 break;
18964
18965 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
18966 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
18967 as_bad_where (fixP->fx_file, fixP->fx_line,
18968 _("branch out of range"));
18969
18970 if (fixP->fx_done || !seg->use_rela_p)
18971 {
18972 newval = md_chars_to_number (buf, THUMB_SIZE);
18973 newval |= (value & 0x1ff) >> 1;
18974 md_number_to_chars (buf, newval, THUMB_SIZE);
18975 }
18976 break;
18977
18978 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18979 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18980 as_bad_where (fixP->fx_file, fixP->fx_line,
18981 _("branch out of range"));
18982
18983 if (fixP->fx_done || !seg->use_rela_p)
18984 {
18985 newval = md_chars_to_number (buf, THUMB_SIZE);
18986 newval |= (value & 0xfff) >> 1;
18987 md_number_to_chars (buf, newval, THUMB_SIZE);
18988 }
18989 break;
18990
18991 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18992 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18993 as_bad_where (fixP->fx_file, fixP->fx_line,
18994 _("conditional branch out of range"));
18995
18996 if (fixP->fx_done || !seg->use_rela_p)
18997 {
18998 offsetT newval2;
18999 addressT S, J1, J2, lo, hi;
19000
19001 S = (value & 0x00100000) >> 20;
19002 J2 = (value & 0x00080000) >> 19;
19003 J1 = (value & 0x00040000) >> 18;
19004 hi = (value & 0x0003f000) >> 12;
19005 lo = (value & 0x00000ffe) >> 1;
19006
19007 newval = md_chars_to_number (buf, THUMB_SIZE);
19008 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19009 newval |= (S << 10) | hi;
19010 newval2 |= (J1 << 13) | (J2 << 11) | lo;
19011 md_number_to_chars (buf, newval, THUMB_SIZE);
19012 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
19013 }
19014 break;
19015
19016 case BFD_RELOC_THUMB_PCREL_BLX:
19017 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19018 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
19019 as_bad_where (fixP->fx_file, fixP->fx_line,
19020 _("branch out of range"));
19021
19022 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
19023 /* For a BLX instruction, make sure that the relocation is rounded up
19024 to a word boundary. This follows the semantics of the instruction
19025 which specifies that bit 1 of the target address will come from bit
19026 1 of the base address. */
19027 value = (value + 1) & ~ 1;
19028
19029 if (fixP->fx_done || !seg->use_rela_p)
19030 {
19031 offsetT newval2;
19032
19033 newval = md_chars_to_number (buf, THUMB_SIZE);
19034 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19035 newval |= (value & 0x7fffff) >> 12;
19036 newval2 |= (value & 0xfff) >> 1;
19037 md_number_to_chars (buf, newval, THUMB_SIZE);
19038 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
19039 }
19040 break;
19041
19042 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19043 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
19044 as_bad_where (fixP->fx_file, fixP->fx_line,
19045 _("branch out of range"));
19046
19047 if (fixP->fx_done || !seg->use_rela_p)
19048 {
19049 offsetT newval2;
19050 addressT S, I1, I2, lo, hi;
19051
19052 S = (value & 0x01000000) >> 24;
19053 I1 = (value & 0x00800000) >> 23;
19054 I2 = (value & 0x00400000) >> 22;
19055 hi = (value & 0x003ff000) >> 12;
19056 lo = (value & 0x00000ffe) >> 1;
19057
19058 I1 = !(I1 ^ S);
19059 I2 = !(I2 ^ S);
19060
19061 newval = md_chars_to_number (buf, THUMB_SIZE);
19062 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19063 newval |= (S << 10) | hi;
19064 newval2 |= (I1 << 13) | (I2 << 11) | lo;
19065 md_number_to_chars (buf, newval, THUMB_SIZE);
19066 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
19067 }
19068 break;
19069
19070 case BFD_RELOC_8:
19071 if (fixP->fx_done || !seg->use_rela_p)
19072 md_number_to_chars (buf, value, 1);
19073 break;
19074
19075 case BFD_RELOC_16:
19076 if (fixP->fx_done || !seg->use_rela_p)
19077 md_number_to_chars (buf, value, 2);
19078 break;
19079
19080 #ifdef OBJ_ELF
19081 case BFD_RELOC_ARM_TLS_GD32:
19082 case BFD_RELOC_ARM_TLS_LE32:
19083 case BFD_RELOC_ARM_TLS_IE32:
19084 case BFD_RELOC_ARM_TLS_LDM32:
19085 case BFD_RELOC_ARM_TLS_LDO32:
19086 S_SET_THREAD_LOCAL (fixP->fx_addsy);
19087 /* fall through */
19088
19089 case BFD_RELOC_ARM_GOT32:
19090 case BFD_RELOC_ARM_GOTOFF:
19091 case BFD_RELOC_ARM_TARGET2:
19092 if (fixP->fx_done || !seg->use_rela_p)
19093 md_number_to_chars (buf, 0, 4);
19094 break;
19095 #endif
19096
19097 case BFD_RELOC_RVA:
19098 case BFD_RELOC_32:
19099 case BFD_RELOC_ARM_TARGET1:
19100 case BFD_RELOC_ARM_ROSEGREL32:
19101 case BFD_RELOC_ARM_SBREL32:
19102 case BFD_RELOC_32_PCREL:
19103 #ifdef TE_PE
19104 case BFD_RELOC_32_SECREL:
19105 #endif
19106 if (fixP->fx_done || !seg->use_rela_p)
19107 #ifdef TE_WINCE
19108 /* For WinCE we only do this for pcrel fixups. */
19109 if (fixP->fx_done || fixP->fx_pcrel)
19110 #endif
19111 md_number_to_chars (buf, value, 4);
19112 break;
19113
19114 #ifdef OBJ_ELF
19115 case BFD_RELOC_ARM_PREL31:
19116 if (fixP->fx_done || !seg->use_rela_p)
19117 {
19118 newval = md_chars_to_number (buf, 4) & 0x80000000;
19119 if ((value ^ (value >> 1)) & 0x40000000)
19120 {
19121 as_bad_where (fixP->fx_file, fixP->fx_line,
19122 _("rel31 relocation overflow"));
19123 }
19124 newval |= value & 0x7fffffff;
19125 md_number_to_chars (buf, newval, 4);
19126 }
19127 break;
19128 #endif
19129
19130 case BFD_RELOC_ARM_CP_OFF_IMM:
19131 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
19132 if (value < -1023 || value > 1023 || (value & 3))
19133 as_bad_where (fixP->fx_file, fixP->fx_line,
19134 _("co-processor offset out of range"));
19135 cp_off_common:
19136 sign = value >= 0;
19137 if (value < 0)
19138 value = -value;
19139 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
19140 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
19141 newval = md_chars_to_number (buf, INSN_SIZE);
19142 else
19143 newval = get_thumb32_insn (buf);
19144 newval &= 0xff7fff00;
19145 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
19146 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
19147 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
19148 md_number_to_chars (buf, newval, INSN_SIZE);
19149 else
19150 put_thumb32_insn (buf, newval);
19151 break;
19152
19153 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
19154 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
19155 if (value < -255 || value > 255)
19156 as_bad_where (fixP->fx_file, fixP->fx_line,
19157 _("co-processor offset out of range"));
19158 value *= 4;
19159 goto cp_off_common;
19160
19161 case BFD_RELOC_ARM_THUMB_OFFSET:
19162 newval = md_chars_to_number (buf, THUMB_SIZE);
19163 /* Exactly what ranges, and where the offset is inserted depends
19164 on the type of instruction, we can establish this from the
19165 top 4 bits. */
19166 switch (newval >> 12)
19167 {
19168 case 4: /* PC load. */
19169 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
19170 forced to zero for these loads; md_pcrel_from has already
19171 compensated for this. */
19172 if (value & 3)
19173 as_bad_where (fixP->fx_file, fixP->fx_line,
19174 _("invalid offset, target not word aligned (0x%08lX)"),
19175 (((unsigned long) fixP->fx_frag->fr_address
19176 + (unsigned long) fixP->fx_where) & ~3)
19177 + (unsigned long) value);
19178
19179 if (value & ~0x3fc)
19180 as_bad_where (fixP->fx_file, fixP->fx_line,
19181 _("invalid offset, value too big (0x%08lX)"),
19182 (long) value);
19183
19184 newval |= value >> 2;
19185 break;
19186
19187 case 9: /* SP load/store. */
19188 if (value & ~0x3fc)
19189 as_bad_where (fixP->fx_file, fixP->fx_line,
19190 _("invalid offset, value too big (0x%08lX)"),
19191 (long) value);
19192 newval |= value >> 2;
19193 break;
19194
19195 case 6: /* Word load/store. */
19196 if (value & ~0x7c)
19197 as_bad_where (fixP->fx_file, fixP->fx_line,
19198 _("invalid offset, value too big (0x%08lX)"),
19199 (long) value);
19200 newval |= value << 4; /* 6 - 2. */
19201 break;
19202
19203 case 7: /* Byte load/store. */
19204 if (value & ~0x1f)
19205 as_bad_where (fixP->fx_file, fixP->fx_line,
19206 _("invalid offset, value too big (0x%08lX)"),
19207 (long) value);
19208 newval |= value << 6;
19209 break;
19210
19211 case 8: /* Halfword load/store. */
19212 if (value & ~0x3e)
19213 as_bad_where (fixP->fx_file, fixP->fx_line,
19214 _("invalid offset, value too big (0x%08lX)"),
19215 (long) value);
19216 newval |= value << 5; /* 6 - 1. */
19217 break;
19218
19219 default:
19220 as_bad_where (fixP->fx_file, fixP->fx_line,
19221 "Unable to process relocation for thumb opcode: %lx",
19222 (unsigned long) newval);
19223 break;
19224 }
19225 md_number_to_chars (buf, newval, THUMB_SIZE);
19226 break;
19227
19228 case BFD_RELOC_ARM_THUMB_ADD:
19229 /* This is a complicated relocation, since we use it for all of
19230 the following immediate relocations:
19231
19232 3bit ADD/SUB
19233 8bit ADD/SUB
19234 9bit ADD/SUB SP word-aligned
19235 10bit ADD PC/SP word-aligned
19236
19237 The type of instruction being processed is encoded in the
19238 instruction field:
19239
19240 0x8000 SUB
19241 0x00F0 Rd
19242 0x000F Rs
19243 */
19244 newval = md_chars_to_number (buf, THUMB_SIZE);
19245 {
19246 int rd = (newval >> 4) & 0xf;
19247 int rs = newval & 0xf;
19248 int subtract = !!(newval & 0x8000);
19249
19250 /* Check for HI regs, only very restricted cases allowed:
19251 Adjusting SP, and using PC or SP to get an address. */
19252 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
19253 || (rs > 7 && rs != REG_SP && rs != REG_PC))
19254 as_bad_where (fixP->fx_file, fixP->fx_line,
19255 _("invalid Hi register with immediate"));
19256
19257 /* If value is negative, choose the opposite instruction. */
19258 if (value < 0)
19259 {
19260 value = -value;
19261 subtract = !subtract;
19262 if (value < 0)
19263 as_bad_where (fixP->fx_file, fixP->fx_line,
19264 _("immediate value out of range"));
19265 }
19266
19267 if (rd == REG_SP)
19268 {
19269 if (value & ~0x1fc)
19270 as_bad_where (fixP->fx_file, fixP->fx_line,
19271 _("invalid immediate for stack address calculation"));
19272 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
19273 newval |= value >> 2;
19274 }
19275 else if (rs == REG_PC || rs == REG_SP)
19276 {
19277 if (subtract || value & ~0x3fc)
19278 as_bad_where (fixP->fx_file, fixP->fx_line,
19279 _("invalid immediate for address calculation (value = 0x%08lX)"),
19280 (unsigned long) value);
19281 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
19282 newval |= rd << 8;
19283 newval |= value >> 2;
19284 }
19285 else if (rs == rd)
19286 {
19287 if (value & ~0xff)
19288 as_bad_where (fixP->fx_file, fixP->fx_line,
19289 _("immediate value out of range"));
19290 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
19291 newval |= (rd << 8) | value;
19292 }
19293 else
19294 {
19295 if (value & ~0x7)
19296 as_bad_where (fixP->fx_file, fixP->fx_line,
19297 _("immediate value out of range"));
19298 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
19299 newval |= rd | (rs << 3) | (value << 6);
19300 }
19301 }
19302 md_number_to_chars (buf, newval, THUMB_SIZE);
19303 break;
19304
19305 case BFD_RELOC_ARM_THUMB_IMM:
19306 newval = md_chars_to_number (buf, THUMB_SIZE);
19307 if (value < 0 || value > 255)
19308 as_bad_where (fixP->fx_file, fixP->fx_line,
19309 _("invalid immediate: %ld is out of range"),
19310 (long) value);
19311 newval |= value;
19312 md_number_to_chars (buf, newval, THUMB_SIZE);
19313 break;
19314
19315 case BFD_RELOC_ARM_THUMB_SHIFT:
19316 /* 5bit shift value (0..32). LSL cannot take 32. */
19317 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
19318 temp = newval & 0xf800;
19319 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
19320 as_bad_where (fixP->fx_file, fixP->fx_line,
19321 _("invalid shift value: %ld"), (long) value);
19322 /* Shifts of zero must be encoded as LSL. */
19323 if (value == 0)
19324 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
19325 /* Shifts of 32 are encoded as zero. */
19326 else if (value == 32)
19327 value = 0;
19328 newval |= value << 6;
19329 md_number_to_chars (buf, newval, THUMB_SIZE);
19330 break;
19331
19332 case BFD_RELOC_VTABLE_INHERIT:
19333 case BFD_RELOC_VTABLE_ENTRY:
19334 fixP->fx_done = 0;
19335 return;
19336
19337 case BFD_RELOC_ARM_MOVW:
19338 case BFD_RELOC_ARM_MOVT:
19339 case BFD_RELOC_ARM_THUMB_MOVW:
19340 case BFD_RELOC_ARM_THUMB_MOVT:
19341 if (fixP->fx_done || !seg->use_rela_p)
19342 {
19343 /* REL format relocations are limited to a 16-bit addend. */
19344 if (!fixP->fx_done)
19345 {
19346 if (value < -0x8000 || value > 0x7fff)
19347 as_bad_where (fixP->fx_file, fixP->fx_line,
19348 _("offset out of range"));
19349 }
19350 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
19351 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
19352 {
19353 value >>= 16;
19354 }
19355
19356 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
19357 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
19358 {
19359 newval = get_thumb32_insn (buf);
19360 newval &= 0xfbf08f00;
19361 newval |= (value & 0xf000) << 4;
19362 newval |= (value & 0x0800) << 15;
19363 newval |= (value & 0x0700) << 4;
19364 newval |= (value & 0x00ff);
19365 put_thumb32_insn (buf, newval);
19366 }
19367 else
19368 {
19369 newval = md_chars_to_number (buf, 4);
19370 newval &= 0xfff0f000;
19371 newval |= value & 0x0fff;
19372 newval |= (value & 0xf000) << 4;
19373 md_number_to_chars (buf, newval, 4);
19374 }
19375 }
19376 return;
19377
19378 case BFD_RELOC_ARM_ALU_PC_G0_NC:
19379 case BFD_RELOC_ARM_ALU_PC_G0:
19380 case BFD_RELOC_ARM_ALU_PC_G1_NC:
19381 case BFD_RELOC_ARM_ALU_PC_G1:
19382 case BFD_RELOC_ARM_ALU_PC_G2:
19383 case BFD_RELOC_ARM_ALU_SB_G0_NC:
19384 case BFD_RELOC_ARM_ALU_SB_G0:
19385 case BFD_RELOC_ARM_ALU_SB_G1_NC:
19386 case BFD_RELOC_ARM_ALU_SB_G1:
19387 case BFD_RELOC_ARM_ALU_SB_G2:
19388 assert (!fixP->fx_done);
19389 if (!seg->use_rela_p)
19390 {
19391 bfd_vma insn;
19392 bfd_vma encoded_addend;
19393 bfd_vma addend_abs = abs (value);
19394
19395 /* Check that the absolute value of the addend can be
19396 expressed as an 8-bit constant plus a rotation. */
19397 encoded_addend = encode_arm_immediate (addend_abs);
19398 if (encoded_addend == (unsigned int) FAIL)
19399 as_bad_where (fixP->fx_file, fixP->fx_line,
19400 _("the offset 0x%08lX is not representable"),
19401 (unsigned long) addend_abs);
19402
19403 /* Extract the instruction. */
19404 insn = md_chars_to_number (buf, INSN_SIZE);
19405
19406 /* If the addend is positive, use an ADD instruction.
19407 Otherwise use a SUB. Take care not to destroy the S bit. */
19408 insn &= 0xff1fffff;
19409 if (value < 0)
19410 insn |= 1 << 22;
19411 else
19412 insn |= 1 << 23;
19413
19414 /* Place the encoded addend into the first 12 bits of the
19415 instruction. */
19416 insn &= 0xfffff000;
19417 insn |= encoded_addend;
19418
19419 /* Update the instruction. */
19420 md_number_to_chars (buf, insn, INSN_SIZE);
19421 }
19422 break;
19423
19424 case BFD_RELOC_ARM_LDR_PC_G0:
19425 case BFD_RELOC_ARM_LDR_PC_G1:
19426 case BFD_RELOC_ARM_LDR_PC_G2:
19427 case BFD_RELOC_ARM_LDR_SB_G0:
19428 case BFD_RELOC_ARM_LDR_SB_G1:
19429 case BFD_RELOC_ARM_LDR_SB_G2:
19430 assert (!fixP->fx_done);
19431 if (!seg->use_rela_p)
19432 {
19433 bfd_vma insn;
19434 bfd_vma addend_abs = abs (value);
19435
19436 /* Check that the absolute value of the addend can be
19437 encoded in 12 bits. */
19438 if (addend_abs >= 0x1000)
19439 as_bad_where (fixP->fx_file, fixP->fx_line,
19440 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
19441 (unsigned long) addend_abs);
19442
19443 /* Extract the instruction. */
19444 insn = md_chars_to_number (buf, INSN_SIZE);
19445
19446 /* If the addend is negative, clear bit 23 of the instruction.
19447 Otherwise set it. */
19448 if (value < 0)
19449 insn &= ~(1 << 23);
19450 else
19451 insn |= 1 << 23;
19452
19453 /* Place the absolute value of the addend into the first 12 bits
19454 of the instruction. */
19455 insn &= 0xfffff000;
19456 insn |= addend_abs;
19457
19458 /* Update the instruction. */
19459 md_number_to_chars (buf, insn, INSN_SIZE);
19460 }
19461 break;
19462
19463 case BFD_RELOC_ARM_LDRS_PC_G0:
19464 case BFD_RELOC_ARM_LDRS_PC_G1:
19465 case BFD_RELOC_ARM_LDRS_PC_G2:
19466 case BFD_RELOC_ARM_LDRS_SB_G0:
19467 case BFD_RELOC_ARM_LDRS_SB_G1:
19468 case BFD_RELOC_ARM_LDRS_SB_G2:
19469 assert (!fixP->fx_done);
19470 if (!seg->use_rela_p)
19471 {
19472 bfd_vma insn;
19473 bfd_vma addend_abs = abs (value);
19474
19475 /* Check that the absolute value of the addend can be
19476 encoded in 8 bits. */
19477 if (addend_abs >= 0x100)
19478 as_bad_where (fixP->fx_file, fixP->fx_line,
19479 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
19480 (unsigned long) addend_abs);
19481
19482 /* Extract the instruction. */
19483 insn = md_chars_to_number (buf, INSN_SIZE);
19484
19485 /* If the addend is negative, clear bit 23 of the instruction.
19486 Otherwise set it. */
19487 if (value < 0)
19488 insn &= ~(1 << 23);
19489 else
19490 insn |= 1 << 23;
19491
19492 /* Place the first four bits of the absolute value of the addend
19493 into the first 4 bits of the instruction, and the remaining
19494 four into bits 8 .. 11. */
19495 insn &= 0xfffff0f0;
19496 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
19497
19498 /* Update the instruction. */
19499 md_number_to_chars (buf, insn, INSN_SIZE);
19500 }
19501 break;
19502
19503 case BFD_RELOC_ARM_LDC_PC_G0:
19504 case BFD_RELOC_ARM_LDC_PC_G1:
19505 case BFD_RELOC_ARM_LDC_PC_G2:
19506 case BFD_RELOC_ARM_LDC_SB_G0:
19507 case BFD_RELOC_ARM_LDC_SB_G1:
19508 case BFD_RELOC_ARM_LDC_SB_G2:
19509 assert (!fixP->fx_done);
19510 if (!seg->use_rela_p)
19511 {
19512 bfd_vma insn;
19513 bfd_vma addend_abs = abs (value);
19514
19515 /* Check that the absolute value of the addend is a multiple of
19516 four and, when divided by four, fits in 8 bits. */
19517 if (addend_abs & 0x3)
19518 as_bad_where (fixP->fx_file, fixP->fx_line,
19519 _("bad offset 0x%08lX (must be word-aligned)"),
19520 (unsigned long) addend_abs);
19521
19522 if ((addend_abs >> 2) > 0xff)
19523 as_bad_where (fixP->fx_file, fixP->fx_line,
19524 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
19525 (unsigned long) addend_abs);
19526
19527 /* Extract the instruction. */
19528 insn = md_chars_to_number (buf, INSN_SIZE);
19529
19530 /* If the addend is negative, clear bit 23 of the instruction.
19531 Otherwise set it. */
19532 if (value < 0)
19533 insn &= ~(1 << 23);
19534 else
19535 insn |= 1 << 23;
19536
19537 /* Place the addend (divided by four) into the first eight
19538 bits of the instruction. */
19539 insn &= 0xfffffff0;
19540 insn |= addend_abs >> 2;
19541
19542 /* Update the instruction. */
19543 md_number_to_chars (buf, insn, INSN_SIZE);
19544 }
19545 break;
19546
19547 case BFD_RELOC_ARM_V4BX:
19548 /* This will need to go in the object file. */
19549 fixP->fx_done = 0;
19550 break;
19551
19552 case BFD_RELOC_UNUSED:
19553 default:
19554 as_bad_where (fixP->fx_file, fixP->fx_line,
19555 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
19556 }
19557 }
19558
19559 /* Translate internal representation of relocation info to BFD target
19560 format. */
19561
19562 arelent *
19563 tc_gen_reloc (asection *section, fixS *fixp)
19564 {
19565 arelent * reloc;
19566 bfd_reloc_code_real_type code;
19567
19568 reloc = xmalloc (sizeof (arelent));
19569
19570 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
19571 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
19572 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
19573
19574 if (fixp->fx_pcrel)
19575 {
19576 if (section->use_rela_p)
19577 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
19578 else
19579 fixp->fx_offset = reloc->address;
19580 }
19581 reloc->addend = fixp->fx_offset;
19582
19583 switch (fixp->fx_r_type)
19584 {
19585 case BFD_RELOC_8:
19586 if (fixp->fx_pcrel)
19587 {
19588 code = BFD_RELOC_8_PCREL;
19589 break;
19590 }
19591
19592 case BFD_RELOC_16:
19593 if (fixp->fx_pcrel)
19594 {
19595 code = BFD_RELOC_16_PCREL;
19596 break;
19597 }
19598
19599 case BFD_RELOC_32:
19600 if (fixp->fx_pcrel)
19601 {
19602 code = BFD_RELOC_32_PCREL;
19603 break;
19604 }
19605
19606 case BFD_RELOC_ARM_MOVW:
19607 if (fixp->fx_pcrel)
19608 {
19609 code = BFD_RELOC_ARM_MOVW_PCREL;
19610 break;
19611 }
19612
19613 case BFD_RELOC_ARM_MOVT:
19614 if (fixp->fx_pcrel)
19615 {
19616 code = BFD_RELOC_ARM_MOVT_PCREL;
19617 break;
19618 }
19619
19620 case BFD_RELOC_ARM_THUMB_MOVW:
19621 if (fixp->fx_pcrel)
19622 {
19623 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
19624 break;
19625 }
19626
19627 case BFD_RELOC_ARM_THUMB_MOVT:
19628 if (fixp->fx_pcrel)
19629 {
19630 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
19631 break;
19632 }
19633
19634 case BFD_RELOC_NONE:
19635 case BFD_RELOC_ARM_PCREL_BRANCH:
19636 case BFD_RELOC_ARM_PCREL_BLX:
19637 case BFD_RELOC_RVA:
19638 case BFD_RELOC_THUMB_PCREL_BRANCH7:
19639 case BFD_RELOC_THUMB_PCREL_BRANCH9:
19640 case BFD_RELOC_THUMB_PCREL_BRANCH12:
19641 case BFD_RELOC_THUMB_PCREL_BRANCH20:
19642 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19643 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19644 case BFD_RELOC_THUMB_PCREL_BLX:
19645 case BFD_RELOC_VTABLE_ENTRY:
19646 case BFD_RELOC_VTABLE_INHERIT:
19647 #ifdef TE_PE
19648 case BFD_RELOC_32_SECREL:
19649 #endif
19650 code = fixp->fx_r_type;
19651 break;
19652
19653 case BFD_RELOC_ARM_LITERAL:
19654 case BFD_RELOC_ARM_HWLITERAL:
19655 /* If this is called then the a literal has
19656 been referenced across a section boundary. */
19657 as_bad_where (fixp->fx_file, fixp->fx_line,
19658 _("literal referenced across section boundary"));
19659 return NULL;
19660
19661 #ifdef OBJ_ELF
19662 case BFD_RELOC_ARM_GOT32:
19663 case BFD_RELOC_ARM_GOTOFF:
19664 case BFD_RELOC_ARM_PLT32:
19665 case BFD_RELOC_ARM_TARGET1:
19666 case BFD_RELOC_ARM_ROSEGREL32:
19667 case BFD_RELOC_ARM_SBREL32:
19668 case BFD_RELOC_ARM_PREL31:
19669 case BFD_RELOC_ARM_TARGET2:
19670 case BFD_RELOC_ARM_TLS_LE32:
19671 case BFD_RELOC_ARM_TLS_LDO32:
19672 case BFD_RELOC_ARM_PCREL_CALL:
19673 case BFD_RELOC_ARM_PCREL_JUMP:
19674 case BFD_RELOC_ARM_ALU_PC_G0_NC:
19675 case BFD_RELOC_ARM_ALU_PC_G0:
19676 case BFD_RELOC_ARM_ALU_PC_G1_NC:
19677 case BFD_RELOC_ARM_ALU_PC_G1:
19678 case BFD_RELOC_ARM_ALU_PC_G2:
19679 case BFD_RELOC_ARM_LDR_PC_G0:
19680 case BFD_RELOC_ARM_LDR_PC_G1:
19681 case BFD_RELOC_ARM_LDR_PC_G2:
19682 case BFD_RELOC_ARM_LDRS_PC_G0:
19683 case BFD_RELOC_ARM_LDRS_PC_G1:
19684 case BFD_RELOC_ARM_LDRS_PC_G2:
19685 case BFD_RELOC_ARM_LDC_PC_G0:
19686 case BFD_RELOC_ARM_LDC_PC_G1:
19687 case BFD_RELOC_ARM_LDC_PC_G2:
19688 case BFD_RELOC_ARM_ALU_SB_G0_NC:
19689 case BFD_RELOC_ARM_ALU_SB_G0:
19690 case BFD_RELOC_ARM_ALU_SB_G1_NC:
19691 case BFD_RELOC_ARM_ALU_SB_G1:
19692 case BFD_RELOC_ARM_ALU_SB_G2:
19693 case BFD_RELOC_ARM_LDR_SB_G0:
19694 case BFD_RELOC_ARM_LDR_SB_G1:
19695 case BFD_RELOC_ARM_LDR_SB_G2:
19696 case BFD_RELOC_ARM_LDRS_SB_G0:
19697 case BFD_RELOC_ARM_LDRS_SB_G1:
19698 case BFD_RELOC_ARM_LDRS_SB_G2:
19699 case BFD_RELOC_ARM_LDC_SB_G0:
19700 case BFD_RELOC_ARM_LDC_SB_G1:
19701 case BFD_RELOC_ARM_LDC_SB_G2:
19702 case BFD_RELOC_ARM_V4BX:
19703 code = fixp->fx_r_type;
19704 break;
19705
19706 case BFD_RELOC_ARM_TLS_GD32:
19707 case BFD_RELOC_ARM_TLS_IE32:
19708 case BFD_RELOC_ARM_TLS_LDM32:
19709 /* BFD will include the symbol's address in the addend.
19710 But we don't want that, so subtract it out again here. */
19711 if (!S_IS_COMMON (fixp->fx_addsy))
19712 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
19713 code = fixp->fx_r_type;
19714 break;
19715 #endif
19716
19717 case BFD_RELOC_ARM_IMMEDIATE:
19718 as_bad_where (fixp->fx_file, fixp->fx_line,
19719 _("internal relocation (type: IMMEDIATE) not fixed up"));
19720 return NULL;
19721
19722 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19723 as_bad_where (fixp->fx_file, fixp->fx_line,
19724 _("ADRL used for a symbol not defined in the same file"));
19725 return NULL;
19726
19727 case BFD_RELOC_ARM_OFFSET_IMM:
19728 if (section->use_rela_p)
19729 {
19730 code = fixp->fx_r_type;
19731 break;
19732 }
19733
19734 if (fixp->fx_addsy != NULL
19735 && !S_IS_DEFINED (fixp->fx_addsy)
19736 && S_IS_LOCAL (fixp->fx_addsy))
19737 {
19738 as_bad_where (fixp->fx_file, fixp->fx_line,
19739 _("undefined local label `%s'"),
19740 S_GET_NAME (fixp->fx_addsy));
19741 return NULL;
19742 }
19743
19744 as_bad_where (fixp->fx_file, fixp->fx_line,
19745 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
19746 return NULL;
19747
19748 default:
19749 {
19750 char * type;
19751
19752 switch (fixp->fx_r_type)
19753 {
19754 case BFD_RELOC_NONE: type = "NONE"; break;
19755 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
19756 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
19757 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
19758 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
19759 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
19760 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
19761 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
19762 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
19763 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
19764 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
19765 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
19766 default: type = _("<unknown>"); break;
19767 }
19768 as_bad_where (fixp->fx_file, fixp->fx_line,
19769 _("cannot represent %s relocation in this object file format"),
19770 type);
19771 return NULL;
19772 }
19773 }
19774
19775 #ifdef OBJ_ELF
19776 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
19777 && GOT_symbol
19778 && fixp->fx_addsy == GOT_symbol)
19779 {
19780 code = BFD_RELOC_ARM_GOTPC;
19781 reloc->addend = fixp->fx_offset = reloc->address;
19782 }
19783 #endif
19784
19785 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
19786
19787 if (reloc->howto == NULL)
19788 {
19789 as_bad_where (fixp->fx_file, fixp->fx_line,
19790 _("cannot represent %s relocation in this object file format"),
19791 bfd_get_reloc_code_name (code));
19792 return NULL;
19793 }
19794
19795 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19796 vtable entry to be used in the relocation's section offset. */
19797 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19798 reloc->address = fixp->fx_offset;
19799
19800 return reloc;
19801 }
19802
19803 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19804
19805 void
19806 cons_fix_new_arm (fragS * frag,
19807 int where,
19808 int size,
19809 expressionS * exp)
19810 {
19811 bfd_reloc_code_real_type type;
19812 int pcrel = 0;
19813
19814 /* Pick a reloc.
19815 FIXME: @@ Should look at CPU word size. */
19816 switch (size)
19817 {
19818 case 1:
19819 type = BFD_RELOC_8;
19820 break;
19821 case 2:
19822 type = BFD_RELOC_16;
19823 break;
19824 case 4:
19825 default:
19826 type = BFD_RELOC_32;
19827 break;
19828 case 8:
19829 type = BFD_RELOC_64;
19830 break;
19831 }
19832
19833 #ifdef TE_PE
19834 if (exp->X_op == O_secrel)
19835 {
19836 exp->X_op = O_symbol;
19837 type = BFD_RELOC_32_SECREL;
19838 }
19839 #endif
19840
19841 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
19842 }
19843
19844 #if defined OBJ_COFF || defined OBJ_ELF
19845 void
19846 arm_validate_fix (fixS * fixP)
19847 {
19848 /* If the destination of the branch is a defined symbol which does not have
19849 the THUMB_FUNC attribute, then we must be calling a function which has
19850 the (interfacearm) attribute. We look for the Thumb entry point to that
19851 function and change the branch to refer to that function instead. */
19852 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
19853 && fixP->fx_addsy != NULL
19854 && S_IS_DEFINED (fixP->fx_addsy)
19855 && ! THUMB_IS_FUNC (fixP->fx_addsy))
19856 {
19857 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
19858 }
19859 }
19860 #endif
19861
19862 int
19863 arm_force_relocation (struct fix * fixp)
19864 {
19865 #if defined (OBJ_COFF) && defined (TE_PE)
19866 if (fixp->fx_r_type == BFD_RELOC_RVA)
19867 return 1;
19868 #endif
19869
19870 /* Resolve these relocations even if the symbol is extern or weak. */
19871 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
19872 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
19873 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
19874 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
19875 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19876 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
19877 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
19878 return 0;
19879
19880 /* Always leave these relocations for the linker. */
19881 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19882 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19883 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19884 return 1;
19885
19886 /* Always generate relocations against function symbols. */
19887 if (fixp->fx_r_type == BFD_RELOC_32
19888 && fixp->fx_addsy
19889 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
19890 return 1;
19891
19892 return generic_force_reloc (fixp);
19893 }
19894
19895 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19896 /* Relocations against function names must be left unadjusted,
19897 so that the linker can use this information to generate interworking
19898 stubs. The MIPS version of this function
19899 also prevents relocations that are mips-16 specific, but I do not
19900 know why it does this.
19901
19902 FIXME:
19903 There is one other problem that ought to be addressed here, but
19904 which currently is not: Taking the address of a label (rather
19905 than a function) and then later jumping to that address. Such
19906 addresses also ought to have their bottom bit set (assuming that
19907 they reside in Thumb code), but at the moment they will not. */
19908
19909 bfd_boolean
19910 arm_fix_adjustable (fixS * fixP)
19911 {
19912 if (fixP->fx_addsy == NULL)
19913 return 1;
19914
19915 /* Preserve relocations against symbols with function type. */
19916 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
19917 return 0;
19918
19919 if (THUMB_IS_FUNC (fixP->fx_addsy)
19920 && fixP->fx_subsy == NULL)
19921 return 0;
19922
19923 /* We need the symbol name for the VTABLE entries. */
19924 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
19925 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19926 return 0;
19927
19928 /* Don't allow symbols to be discarded on GOT related relocs. */
19929 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
19930 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
19931 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
19932 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
19933 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
19934 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
19935 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
19936 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
19937 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
19938 return 0;
19939
19940 /* Similarly for group relocations. */
19941 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19942 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19943 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19944 return 0;
19945
19946 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
19947 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
19948 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
19949 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
19950 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
19951 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
19952 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
19953 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
19954 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
19955 return 0;
19956
19957 return 1;
19958 }
19959 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19960
19961 #ifdef OBJ_ELF
19962
19963 const char *
19964 elf32_arm_target_format (void)
19965 {
19966 #ifdef TE_SYMBIAN
19967 return (target_big_endian
19968 ? "elf32-bigarm-symbian"
19969 : "elf32-littlearm-symbian");
19970 #elif defined (TE_VXWORKS)
19971 return (target_big_endian
19972 ? "elf32-bigarm-vxworks"
19973 : "elf32-littlearm-vxworks");
19974 #else
19975 if (target_big_endian)
19976 return "elf32-bigarm";
19977 else
19978 return "elf32-littlearm";
19979 #endif
19980 }
19981
19982 void
19983 armelf_frob_symbol (symbolS * symp,
19984 int * puntp)
19985 {
19986 elf_frob_symbol (symp, puntp);
19987 }
19988 #endif
19989
19990 /* MD interface: Finalization. */
19991
19992 /* A good place to do this, although this was probably not intended
19993 for this kind of use. We need to dump the literal pool before
19994 references are made to a null symbol pointer. */
19995
19996 void
19997 arm_cleanup (void)
19998 {
19999 literal_pool * pool;
20000
20001 for (pool = list_of_pools; pool; pool = pool->next)
20002 {
20003 /* Put it at the end of the relevant section. */
20004 subseg_set (pool->section, pool->sub_section);
20005 #ifdef OBJ_ELF
20006 arm_elf_change_section ();
20007 #endif
20008 s_ltorg (0);
20009 }
20010 }
20011
20012 /* Adjust the symbol table. This marks Thumb symbols as distinct from
20013 ARM ones. */
20014
20015 void
20016 arm_adjust_symtab (void)
20017 {
20018 #ifdef OBJ_COFF
20019 symbolS * sym;
20020
20021 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
20022 {
20023 if (ARM_IS_THUMB (sym))
20024 {
20025 if (THUMB_IS_FUNC (sym))
20026 {
20027 /* Mark the symbol as a Thumb function. */
20028 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
20029 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
20030 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
20031
20032 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
20033 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
20034 else
20035 as_bad (_("%s: unexpected function type: %d"),
20036 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
20037 }
20038 else switch (S_GET_STORAGE_CLASS (sym))
20039 {
20040 case C_EXT:
20041 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
20042 break;
20043 case C_STAT:
20044 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
20045 break;
20046 case C_LABEL:
20047 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
20048 break;
20049 default:
20050 /* Do nothing. */
20051 break;
20052 }
20053 }
20054
20055 if (ARM_IS_INTERWORK (sym))
20056 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
20057 }
20058 #endif
20059 #ifdef OBJ_ELF
20060 symbolS * sym;
20061 char bind;
20062
20063 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
20064 {
20065 if (ARM_IS_THUMB (sym))
20066 {
20067 elf_symbol_type * elf_sym;
20068
20069 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
20070 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
20071
20072 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
20073 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
20074 {
20075 /* If it's a .thumb_func, declare it as so,
20076 otherwise tag label as .code 16. */
20077 if (THUMB_IS_FUNC (sym))
20078 elf_sym->internal_elf_sym.st_info =
20079 ELF_ST_INFO (bind, STT_ARM_TFUNC);
20080 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20081 elf_sym->internal_elf_sym.st_info =
20082 ELF_ST_INFO (bind, STT_ARM_16BIT);
20083 }
20084 }
20085 }
20086 #endif
20087 }
20088
20089 /* MD interface: Initialization. */
20090
20091 static void
20092 set_constant_flonums (void)
20093 {
20094 int i;
20095
20096 for (i = 0; i < NUM_FLOAT_VALS; i++)
20097 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
20098 abort ();
20099 }
20100
20101 /* Auto-select Thumb mode if it's the only available instruction set for the
20102 given architecture. */
20103
20104 static void
20105 autoselect_thumb_from_cpu_variant (void)
20106 {
20107 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
20108 opcode_select (16);
20109 }
20110
20111 void
20112 md_begin (void)
20113 {
20114 unsigned mach;
20115 unsigned int i;
20116
20117 if ( (arm_ops_hsh = hash_new ()) == NULL
20118 || (arm_cond_hsh = hash_new ()) == NULL
20119 || (arm_shift_hsh = hash_new ()) == NULL
20120 || (arm_psr_hsh = hash_new ()) == NULL
20121 || (arm_v7m_psr_hsh = hash_new ()) == NULL
20122 || (arm_reg_hsh = hash_new ()) == NULL
20123 || (arm_reloc_hsh = hash_new ()) == NULL
20124 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
20125 as_fatal (_("virtual memory exhausted"));
20126
20127 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
20128 hash_insert (arm_ops_hsh, insns[i].template, (void *) (insns + i));
20129 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
20130 hash_insert (arm_cond_hsh, conds[i].template, (void *) (conds + i));
20131 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
20132 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
20133 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
20134 hash_insert (arm_psr_hsh, psrs[i].template, (void *) (psrs + i));
20135 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
20136 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (void *) (v7m_psrs + i));
20137 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
20138 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
20139 for (i = 0;
20140 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
20141 i++)
20142 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
20143 (void *) (barrier_opt_names + i));
20144 #ifdef OBJ_ELF
20145 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
20146 hash_insert (arm_reloc_hsh, reloc_names[i].name, (void *) (reloc_names + i));
20147 #endif
20148
20149 set_constant_flonums ();
20150
20151 /* Set the cpu variant based on the command-line options. We prefer
20152 -mcpu= over -march= if both are set (as for GCC); and we prefer
20153 -mfpu= over any other way of setting the floating point unit.
20154 Use of legacy options with new options are faulted. */
20155 if (legacy_cpu)
20156 {
20157 if (mcpu_cpu_opt || march_cpu_opt)
20158 as_bad (_("use of old and new-style options to set CPU type"));
20159
20160 mcpu_cpu_opt = legacy_cpu;
20161 }
20162 else if (!mcpu_cpu_opt)
20163 mcpu_cpu_opt = march_cpu_opt;
20164
20165 if (legacy_fpu)
20166 {
20167 if (mfpu_opt)
20168 as_bad (_("use of old and new-style options to set FPU type"));
20169
20170 mfpu_opt = legacy_fpu;
20171 }
20172 else if (!mfpu_opt)
20173 {
20174 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
20175 /* Some environments specify a default FPU. If they don't, infer it
20176 from the processor. */
20177 if (mcpu_fpu_opt)
20178 mfpu_opt = mcpu_fpu_opt;
20179 else
20180 mfpu_opt = march_fpu_opt;
20181 #else
20182 mfpu_opt = &fpu_default;
20183 #endif
20184 }
20185
20186 if (!mfpu_opt)
20187 {
20188 if (mcpu_cpu_opt != NULL)
20189 mfpu_opt = &fpu_default;
20190 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
20191 mfpu_opt = &fpu_arch_vfp_v2;
20192 else
20193 mfpu_opt = &fpu_arch_fpa;
20194 }
20195
20196 #ifdef CPU_DEFAULT
20197 if (!mcpu_cpu_opt)
20198 {
20199 mcpu_cpu_opt = &cpu_default;
20200 selected_cpu = cpu_default;
20201 }
20202 #else
20203 if (mcpu_cpu_opt)
20204 selected_cpu = *mcpu_cpu_opt;
20205 else
20206 mcpu_cpu_opt = &arm_arch_any;
20207 #endif
20208
20209 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20210
20211 autoselect_thumb_from_cpu_variant ();
20212
20213 arm_arch_used = thumb_arch_used = arm_arch_none;
20214
20215 #if defined OBJ_COFF || defined OBJ_ELF
20216 {
20217 unsigned int flags = 0;
20218
20219 #if defined OBJ_ELF
20220 flags = meabi_flags;
20221
20222 switch (meabi_flags)
20223 {
20224 case EF_ARM_EABI_UNKNOWN:
20225 #endif
20226 /* Set the flags in the private structure. */
20227 if (uses_apcs_26) flags |= F_APCS26;
20228 if (support_interwork) flags |= F_INTERWORK;
20229 if (uses_apcs_float) flags |= F_APCS_FLOAT;
20230 if (pic_code) flags |= F_PIC;
20231 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
20232 flags |= F_SOFT_FLOAT;
20233
20234 switch (mfloat_abi_opt)
20235 {
20236 case ARM_FLOAT_ABI_SOFT:
20237 case ARM_FLOAT_ABI_SOFTFP:
20238 flags |= F_SOFT_FLOAT;
20239 break;
20240
20241 case ARM_FLOAT_ABI_HARD:
20242 if (flags & F_SOFT_FLOAT)
20243 as_bad (_("hard-float conflicts with specified fpu"));
20244 break;
20245 }
20246
20247 /* Using pure-endian doubles (even if soft-float). */
20248 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
20249 flags |= F_VFP_FLOAT;
20250
20251 #if defined OBJ_ELF
20252 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
20253 flags |= EF_ARM_MAVERICK_FLOAT;
20254 break;
20255
20256 case EF_ARM_EABI_VER4:
20257 case EF_ARM_EABI_VER5:
20258 /* No additional flags to set. */
20259 break;
20260
20261 default:
20262 abort ();
20263 }
20264 #endif
20265 bfd_set_private_flags (stdoutput, flags);
20266
20267 /* We have run out flags in the COFF header to encode the
20268 status of ATPCS support, so instead we create a dummy,
20269 empty, debug section called .arm.atpcs. */
20270 if (atpcs)
20271 {
20272 asection * sec;
20273
20274 sec = bfd_make_section (stdoutput, ".arm.atpcs");
20275
20276 if (sec != NULL)
20277 {
20278 bfd_set_section_flags
20279 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
20280 bfd_set_section_size (stdoutput, sec, 0);
20281 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
20282 }
20283 }
20284 }
20285 #endif
20286
20287 /* Record the CPU type as well. */
20288 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
20289 mach = bfd_mach_arm_iWMMXt2;
20290 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
20291 mach = bfd_mach_arm_iWMMXt;
20292 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
20293 mach = bfd_mach_arm_XScale;
20294 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
20295 mach = bfd_mach_arm_ep9312;
20296 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
20297 mach = bfd_mach_arm_5TE;
20298 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
20299 {
20300 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
20301 mach = bfd_mach_arm_5T;
20302 else
20303 mach = bfd_mach_arm_5;
20304 }
20305 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
20306 {
20307 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
20308 mach = bfd_mach_arm_4T;
20309 else
20310 mach = bfd_mach_arm_4;
20311 }
20312 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
20313 mach = bfd_mach_arm_3M;
20314 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
20315 mach = bfd_mach_arm_3;
20316 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
20317 mach = bfd_mach_arm_2a;
20318 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
20319 mach = bfd_mach_arm_2;
20320 else
20321 mach = bfd_mach_arm_unknown;
20322
20323 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
20324 }
20325
20326 /* Command line processing. */
20327
20328 /* md_parse_option
20329 Invocation line includes a switch not recognized by the base assembler.
20330 See if it's a processor-specific option.
20331
20332 This routine is somewhat complicated by the need for backwards
20333 compatibility (since older releases of gcc can't be changed).
20334 The new options try to make the interface as compatible as
20335 possible with GCC.
20336
20337 New options (supported) are:
20338
20339 -mcpu=<cpu name> Assemble for selected processor
20340 -march=<architecture name> Assemble for selected architecture
20341 -mfpu=<fpu architecture> Assemble for selected FPU.
20342 -EB/-mbig-endian Big-endian
20343 -EL/-mlittle-endian Little-endian
20344 -k Generate PIC code
20345 -mthumb Start in Thumb mode
20346 -mthumb-interwork Code supports ARM/Thumb interworking
20347
20348 -m[no-]warn-deprecated Warn about deprecated features
20349
20350 For now we will also provide support for:
20351
20352 -mapcs-32 32-bit Program counter
20353 -mapcs-26 26-bit Program counter
20354 -macps-float Floats passed in FP registers
20355 -mapcs-reentrant Reentrant code
20356 -matpcs
20357 (sometime these will probably be replaced with -mapcs=<list of options>
20358 and -matpcs=<list of options>)
20359
20360 The remaining options are only supported for back-wards compatibility.
20361 Cpu variants, the arm part is optional:
20362 -m[arm]1 Currently not supported.
20363 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
20364 -m[arm]3 Arm 3 processor
20365 -m[arm]6[xx], Arm 6 processors
20366 -m[arm]7[xx][t][[d]m] Arm 7 processors
20367 -m[arm]8[10] Arm 8 processors
20368 -m[arm]9[20][tdmi] Arm 9 processors
20369 -mstrongarm[110[0]] StrongARM processors
20370 -mxscale XScale processors
20371 -m[arm]v[2345[t[e]]] Arm architectures
20372 -mall All (except the ARM1)
20373 FP variants:
20374 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
20375 -mfpe-old (No float load/store multiples)
20376 -mvfpxd VFP Single precision
20377 -mvfp All VFP
20378 -mno-fpu Disable all floating point instructions
20379
20380 The following CPU names are recognized:
20381 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
20382 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
20383 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
20384 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
20385 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
20386 arm10t arm10e, arm1020t, arm1020e, arm10200e,
20387 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
20388
20389 */
20390
20391 const char * md_shortopts = "m:k";
20392
20393 #ifdef ARM_BI_ENDIAN
20394 #define OPTION_EB (OPTION_MD_BASE + 0)
20395 #define OPTION_EL (OPTION_MD_BASE + 1)
20396 #else
20397 #if TARGET_BYTES_BIG_ENDIAN
20398 #define OPTION_EB (OPTION_MD_BASE + 0)
20399 #else
20400 #define OPTION_EL (OPTION_MD_BASE + 1)
20401 #endif
20402 #endif
20403 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
20404
20405 struct option md_longopts[] =
20406 {
20407 #ifdef OPTION_EB
20408 {"EB", no_argument, NULL, OPTION_EB},
20409 #endif
20410 #ifdef OPTION_EL
20411 {"EL", no_argument, NULL, OPTION_EL},
20412 #endif
20413 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
20414 {NULL, no_argument, NULL, 0}
20415 };
20416
20417 size_t md_longopts_size = sizeof (md_longopts);
20418
20419 struct arm_option_table
20420 {
20421 char *option; /* Option name to match. */
20422 char *help; /* Help information. */
20423 int *var; /* Variable to change. */
20424 int value; /* What to change it to. */
20425 char *deprecated; /* If non-null, print this message. */
20426 };
20427
20428 struct arm_option_table arm_opts[] =
20429 {
20430 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
20431 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
20432 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
20433 &support_interwork, 1, NULL},
20434 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
20435 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
20436 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
20437 1, NULL},
20438 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
20439 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
20440 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
20441 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
20442 NULL},
20443
20444 /* These are recognized by the assembler, but have no affect on code. */
20445 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
20446 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
20447
20448 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
20449 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
20450 &warn_on_deprecated, 0, NULL},
20451 {NULL, NULL, NULL, 0, NULL}
20452 };
20453
20454 struct arm_legacy_option_table
20455 {
20456 char *option; /* Option name to match. */
20457 const arm_feature_set **var; /* Variable to change. */
20458 const arm_feature_set value; /* What to change it to. */
20459 char *deprecated; /* If non-null, print this message. */
20460 };
20461
20462 const struct arm_legacy_option_table arm_legacy_opts[] =
20463 {
20464 /* DON'T add any new processors to this list -- we want the whole list
20465 to go away... Add them to the processors table instead. */
20466 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
20467 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
20468 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
20469 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
20470 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
20471 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
20472 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
20473 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
20474 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
20475 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
20476 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
20477 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
20478 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
20479 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
20480 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
20481 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
20482 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
20483 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
20484 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
20485 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
20486 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
20487 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
20488 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
20489 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
20490 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
20491 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
20492 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
20493 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
20494 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
20495 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
20496 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
20497 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
20498 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
20499 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
20500 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
20501 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
20502 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
20503 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
20504 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
20505 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
20506 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
20507 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
20508 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
20509 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
20510 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
20511 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
20512 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20513 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20514 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20515 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20516 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
20517 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
20518 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
20519 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
20520 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
20521 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
20522 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
20523 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
20524 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
20525 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
20526 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
20527 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
20528 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
20529 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
20530 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
20531 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
20532 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
20533 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
20534 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
20535 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
20536 N_("use -mcpu=strongarm110")},
20537 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
20538 N_("use -mcpu=strongarm1100")},
20539 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
20540 N_("use -mcpu=strongarm1110")},
20541 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
20542 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
20543 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
20544
20545 /* Architecture variants -- don't add any more to this list either. */
20546 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
20547 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
20548 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
20549 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
20550 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
20551 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
20552 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
20553 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
20554 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
20555 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
20556 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
20557 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
20558 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
20559 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
20560 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
20561 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
20562 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
20563 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
20564
20565 /* Floating point variants -- don't add any more to this list either. */
20566 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
20567 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
20568 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
20569 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
20570 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
20571
20572 {NULL, NULL, ARM_ARCH_NONE, NULL}
20573 };
20574
20575 struct arm_cpu_option_table
20576 {
20577 char *name;
20578 const arm_feature_set value;
20579 /* For some CPUs we assume an FPU unless the user explicitly sets
20580 -mfpu=... */
20581 const arm_feature_set default_fpu;
20582 /* The canonical name of the CPU, or NULL to use NAME converted to upper
20583 case. */
20584 const char *canonical_name;
20585 };
20586
20587 /* This list should, at a minimum, contain all the cpu names
20588 recognized by GCC. */
20589 static const struct arm_cpu_option_table arm_cpus[] =
20590 {
20591 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
20592 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
20593 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
20594 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
20595 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
20596 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20597 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20598 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20599 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20600 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20601 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20602 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
20603 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20604 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
20605 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20606 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
20607 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20608 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20609 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20610 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20611 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20612 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20613 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20614 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20615 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20616 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20617 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20618 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20619 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20620 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20621 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20622 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20623 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20624 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20625 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20626 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20627 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20628 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20629 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20630 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
20631 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20632 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20633 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20634 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20635 {"fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20636 {"fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20637 /* For V5 or later processors we default to using VFP; but the user
20638 should really set the FPU type explicitly. */
20639 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
20640 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20641 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
20642 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
20643 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
20644 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
20645 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
20646 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20647 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
20648 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
20649 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20650 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20651 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
20652 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
20653 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20654 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
20655 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
20656 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20657 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20658 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
20659 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
20660 {"fa626te", ARM_ARCH_V5TE, FPU_NONE, NULL},
20661 {"fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20662 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
20663 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
20664 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
20665 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
20666 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
20667 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
20668 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
20669 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
20670 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
20671 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
20672 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
20673 | FPU_NEON_EXT_V1),
20674 NULL},
20675 {"cortex-a9", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
20676 | FPU_NEON_EXT_V1),
20677 NULL},
20678 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
20679 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
20680 {"cortex-m1", ARM_ARCH_V6M, FPU_NONE, NULL},
20681 /* ??? XSCALE is really an architecture. */
20682 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
20683 /* ??? iwmmxt is not a processor. */
20684 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
20685 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
20686 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
20687 /* Maverick */
20688 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
20689 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
20690 };
20691
20692 struct arm_arch_option_table
20693 {
20694 char *name;
20695 const arm_feature_set value;
20696 const arm_feature_set default_fpu;
20697 };
20698
20699 /* This list should, at a minimum, contain all the architecture names
20700 recognized by GCC. */
20701 static const struct arm_arch_option_table arm_archs[] =
20702 {
20703 {"all", ARM_ANY, FPU_ARCH_FPA},
20704 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
20705 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
20706 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
20707 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
20708 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
20709 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
20710 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
20711 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
20712 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
20713 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
20714 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
20715 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
20716 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
20717 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
20718 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
20719 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
20720 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
20721 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
20722 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
20723 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
20724 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
20725 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
20726 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
20727 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
20728 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
20729 {"armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP},
20730 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
20731 /* The official spelling of the ARMv7 profile variants is the dashed form.
20732 Accept the non-dashed form for compatibility with old toolchains. */
20733 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
20734 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
20735 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20736 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
20737 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
20738 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20739 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
20740 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
20741 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
20742 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
20743 };
20744
20745 /* ISA extensions in the co-processor space. */
20746 struct arm_option_cpu_value_table
20747 {
20748 char *name;
20749 const arm_feature_set value;
20750 };
20751
20752 static const struct arm_option_cpu_value_table arm_extensions[] =
20753 {
20754 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
20755 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
20756 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
20757 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
20758 {NULL, ARM_ARCH_NONE}
20759 };
20760
20761 /* This list should, at a minimum, contain all the fpu names
20762 recognized by GCC. */
20763 static const struct arm_option_cpu_value_table arm_fpus[] =
20764 {
20765 {"softfpa", FPU_NONE},
20766 {"fpe", FPU_ARCH_FPE},
20767 {"fpe2", FPU_ARCH_FPE},
20768 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
20769 {"fpa", FPU_ARCH_FPA},
20770 {"fpa10", FPU_ARCH_FPA},
20771 {"fpa11", FPU_ARCH_FPA},
20772 {"arm7500fe", FPU_ARCH_FPA},
20773 {"softvfp", FPU_ARCH_VFP},
20774 {"softvfp+vfp", FPU_ARCH_VFP_V2},
20775 {"vfp", FPU_ARCH_VFP_V2},
20776 {"vfp9", FPU_ARCH_VFP_V2},
20777 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
20778 {"vfp10", FPU_ARCH_VFP_V2},
20779 {"vfp10-r0", FPU_ARCH_VFP_V1},
20780 {"vfpxd", FPU_ARCH_VFP_V1xD},
20781 {"vfpv2", FPU_ARCH_VFP_V2},
20782 {"vfpv3", FPU_ARCH_VFP_V3},
20783 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
20784 {"arm1020t", FPU_ARCH_VFP_V1},
20785 {"arm1020e", FPU_ARCH_VFP_V2},
20786 {"arm1136jfs", FPU_ARCH_VFP_V2},
20787 {"arm1136jf-s", FPU_ARCH_VFP_V2},
20788 {"maverick", FPU_ARCH_MAVERICK},
20789 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
20790 {"neon-fp16", FPU_ARCH_NEON_FP16},
20791 {NULL, ARM_ARCH_NONE}
20792 };
20793
20794 struct arm_option_value_table
20795 {
20796 char *name;
20797 long value;
20798 };
20799
20800 static const struct arm_option_value_table arm_float_abis[] =
20801 {
20802 {"hard", ARM_FLOAT_ABI_HARD},
20803 {"softfp", ARM_FLOAT_ABI_SOFTFP},
20804 {"soft", ARM_FLOAT_ABI_SOFT},
20805 {NULL, 0}
20806 };
20807
20808 #ifdef OBJ_ELF
20809 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
20810 static const struct arm_option_value_table arm_eabis[] =
20811 {
20812 {"gnu", EF_ARM_EABI_UNKNOWN},
20813 {"4", EF_ARM_EABI_VER4},
20814 {"5", EF_ARM_EABI_VER5},
20815 {NULL, 0}
20816 };
20817 #endif
20818
20819 struct arm_long_option_table
20820 {
20821 char * option; /* Substring to match. */
20822 char * help; /* Help information. */
20823 int (* func) (char * subopt); /* Function to decode sub-option. */
20824 char * deprecated; /* If non-null, print this message. */
20825 };
20826
20827 static int
20828 arm_parse_extension (char * str, const arm_feature_set **opt_p)
20829 {
20830 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
20831
20832 /* Copy the feature set, so that we can modify it. */
20833 *ext_set = **opt_p;
20834 *opt_p = ext_set;
20835
20836 while (str != NULL && *str != 0)
20837 {
20838 const struct arm_option_cpu_value_table * opt;
20839 char * ext;
20840 int optlen;
20841
20842 if (*str != '+')
20843 {
20844 as_bad (_("invalid architectural extension"));
20845 return 0;
20846 }
20847
20848 str++;
20849 ext = strchr (str, '+');
20850
20851 if (ext != NULL)
20852 optlen = ext - str;
20853 else
20854 optlen = strlen (str);
20855
20856 if (optlen == 0)
20857 {
20858 as_bad (_("missing architectural extension"));
20859 return 0;
20860 }
20861
20862 for (opt = arm_extensions; opt->name != NULL; opt++)
20863 if (strncmp (opt->name, str, optlen) == 0)
20864 {
20865 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
20866 break;
20867 }
20868
20869 if (opt->name == NULL)
20870 {
20871 as_bad (_("unknown architectural extension `%s'"), str);
20872 return 0;
20873 }
20874
20875 str = ext;
20876 };
20877
20878 return 1;
20879 }
20880
20881 static int
20882 arm_parse_cpu (char * str)
20883 {
20884 const struct arm_cpu_option_table * opt;
20885 char * ext = strchr (str, '+');
20886 int optlen;
20887
20888 if (ext != NULL)
20889 optlen = ext - str;
20890 else
20891 optlen = strlen (str);
20892
20893 if (optlen == 0)
20894 {
20895 as_bad (_("missing cpu name `%s'"), str);
20896 return 0;
20897 }
20898
20899 for (opt = arm_cpus; opt->name != NULL; opt++)
20900 if (strncmp (opt->name, str, optlen) == 0)
20901 {
20902 mcpu_cpu_opt = &opt->value;
20903 mcpu_fpu_opt = &opt->default_fpu;
20904 if (opt->canonical_name)
20905 strcpy (selected_cpu_name, opt->canonical_name);
20906 else
20907 {
20908 int i;
20909 for (i = 0; i < optlen; i++)
20910 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20911 selected_cpu_name[i] = 0;
20912 }
20913
20914 if (ext != NULL)
20915 return arm_parse_extension (ext, &mcpu_cpu_opt);
20916
20917 return 1;
20918 }
20919
20920 as_bad (_("unknown cpu `%s'"), str);
20921 return 0;
20922 }
20923
20924 static int
20925 arm_parse_arch (char * str)
20926 {
20927 const struct arm_arch_option_table *opt;
20928 char *ext = strchr (str, '+');
20929 int optlen;
20930
20931 if (ext != NULL)
20932 optlen = ext - str;
20933 else
20934 optlen = strlen (str);
20935
20936 if (optlen == 0)
20937 {
20938 as_bad (_("missing architecture name `%s'"), str);
20939 return 0;
20940 }
20941
20942 for (opt = arm_archs; opt->name != NULL; opt++)
20943 if (streq (opt->name, str))
20944 {
20945 march_cpu_opt = &opt->value;
20946 march_fpu_opt = &opt->default_fpu;
20947 strcpy (selected_cpu_name, opt->name);
20948
20949 if (ext != NULL)
20950 return arm_parse_extension (ext, &march_cpu_opt);
20951
20952 return 1;
20953 }
20954
20955 as_bad (_("unknown architecture `%s'\n"), str);
20956 return 0;
20957 }
20958
20959 static int
20960 arm_parse_fpu (char * str)
20961 {
20962 const struct arm_option_cpu_value_table * opt;
20963
20964 for (opt = arm_fpus; opt->name != NULL; opt++)
20965 if (streq (opt->name, str))
20966 {
20967 mfpu_opt = &opt->value;
20968 return 1;
20969 }
20970
20971 as_bad (_("unknown floating point format `%s'\n"), str);
20972 return 0;
20973 }
20974
20975 static int
20976 arm_parse_float_abi (char * str)
20977 {
20978 const struct arm_option_value_table * opt;
20979
20980 for (opt = arm_float_abis; opt->name != NULL; opt++)
20981 if (streq (opt->name, str))
20982 {
20983 mfloat_abi_opt = opt->value;
20984 return 1;
20985 }
20986
20987 as_bad (_("unknown floating point abi `%s'\n"), str);
20988 return 0;
20989 }
20990
20991 #ifdef OBJ_ELF
20992 static int
20993 arm_parse_eabi (char * str)
20994 {
20995 const struct arm_option_value_table *opt;
20996
20997 for (opt = arm_eabis; opt->name != NULL; opt++)
20998 if (streq (opt->name, str))
20999 {
21000 meabi_flags = opt->value;
21001 return 1;
21002 }
21003 as_bad (_("unknown EABI `%s'\n"), str);
21004 return 0;
21005 }
21006 #endif
21007
21008 struct arm_long_option_table arm_long_opts[] =
21009 {
21010 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
21011 arm_parse_cpu, NULL},
21012 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
21013 arm_parse_arch, NULL},
21014 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
21015 arm_parse_fpu, NULL},
21016 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
21017 arm_parse_float_abi, NULL},
21018 #ifdef OBJ_ELF
21019 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
21020 arm_parse_eabi, NULL},
21021 #endif
21022 {NULL, NULL, 0, NULL}
21023 };
21024
21025 int
21026 md_parse_option (int c, char * arg)
21027 {
21028 struct arm_option_table *opt;
21029 const struct arm_legacy_option_table *fopt;
21030 struct arm_long_option_table *lopt;
21031
21032 switch (c)
21033 {
21034 #ifdef OPTION_EB
21035 case OPTION_EB:
21036 target_big_endian = 1;
21037 break;
21038 #endif
21039
21040 #ifdef OPTION_EL
21041 case OPTION_EL:
21042 target_big_endian = 0;
21043 break;
21044 #endif
21045
21046 case OPTION_FIX_V4BX:
21047 fix_v4bx = TRUE;
21048 break;
21049
21050 case 'a':
21051 /* Listing option. Just ignore these, we don't support additional
21052 ones. */
21053 return 0;
21054
21055 default:
21056 for (opt = arm_opts; opt->option != NULL; opt++)
21057 {
21058 if (c == opt->option[0]
21059 && ((arg == NULL && opt->option[1] == 0)
21060 || streq (arg, opt->option + 1)))
21061 {
21062 /* If the option is deprecated, tell the user. */
21063 if (warn_on_deprecated && opt->deprecated != NULL)
21064 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
21065 arg ? arg : "", _(opt->deprecated));
21066
21067 if (opt->var != NULL)
21068 *opt->var = opt->value;
21069
21070 return 1;
21071 }
21072 }
21073
21074 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
21075 {
21076 if (c == fopt->option[0]
21077 && ((arg == NULL && fopt->option[1] == 0)
21078 || streq (arg, fopt->option + 1)))
21079 {
21080 /* If the option is deprecated, tell the user. */
21081 if (warn_on_deprecated && fopt->deprecated != NULL)
21082 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
21083 arg ? arg : "", _(fopt->deprecated));
21084
21085 if (fopt->var != NULL)
21086 *fopt->var = &fopt->value;
21087
21088 return 1;
21089 }
21090 }
21091
21092 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
21093 {
21094 /* These options are expected to have an argument. */
21095 if (c == lopt->option[0]
21096 && arg != NULL
21097 && strncmp (arg, lopt->option + 1,
21098 strlen (lopt->option + 1)) == 0)
21099 {
21100 /* If the option is deprecated, tell the user. */
21101 if (warn_on_deprecated && lopt->deprecated != NULL)
21102 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
21103 _(lopt->deprecated));
21104
21105 /* Call the sup-option parser. */
21106 return lopt->func (arg + strlen (lopt->option) - 1);
21107 }
21108 }
21109
21110 return 0;
21111 }
21112
21113 return 1;
21114 }
21115
21116 void
21117 md_show_usage (FILE * fp)
21118 {
21119 struct arm_option_table *opt;
21120 struct arm_long_option_table *lopt;
21121
21122 fprintf (fp, _(" ARM-specific assembler options:\n"));
21123
21124 for (opt = arm_opts; opt->option != NULL; opt++)
21125 if (opt->help != NULL)
21126 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
21127
21128 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
21129 if (lopt->help != NULL)
21130 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
21131
21132 #ifdef OPTION_EB
21133 fprintf (fp, _("\
21134 -EB assemble code for a big-endian cpu\n"));
21135 #endif
21136
21137 #ifdef OPTION_EL
21138 fprintf (fp, _("\
21139 -EL assemble code for a little-endian cpu\n"));
21140 #endif
21141
21142 fprintf (fp, _("\
21143 --fix-v4bx Allow BX in ARMv4 code\n"));
21144 }
21145
21146
21147 #ifdef OBJ_ELF
21148 typedef struct
21149 {
21150 int val;
21151 arm_feature_set flags;
21152 } cpu_arch_ver_table;
21153
21154 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
21155 least features first. */
21156 static const cpu_arch_ver_table cpu_arch_ver[] =
21157 {
21158 {1, ARM_ARCH_V4},
21159 {2, ARM_ARCH_V4T},
21160 {3, ARM_ARCH_V5},
21161 {3, ARM_ARCH_V5T},
21162 {4, ARM_ARCH_V5TE},
21163 {5, ARM_ARCH_V5TEJ},
21164 {6, ARM_ARCH_V6},
21165 {7, ARM_ARCH_V6Z},
21166 {9, ARM_ARCH_V6K},
21167 {11, ARM_ARCH_V6M},
21168 {8, ARM_ARCH_V6T2},
21169 {10, ARM_ARCH_V7A},
21170 {10, ARM_ARCH_V7R},
21171 {10, ARM_ARCH_V7M},
21172 {0, ARM_ARCH_NONE}
21173 };
21174
21175 /* Set an attribute if it has not already been set by the user. */
21176 static void
21177 aeabi_set_attribute_int (int tag, int value)
21178 {
21179 if (tag < 1
21180 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
21181 || !attributes_set_explicitly[tag])
21182 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
21183 }
21184
21185 static void
21186 aeabi_set_attribute_string (int tag, const char *value)
21187 {
21188 if (tag < 1
21189 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
21190 || !attributes_set_explicitly[tag])
21191 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
21192 }
21193
21194 /* Set the public EABI object attributes. */
21195 static void
21196 aeabi_set_public_attributes (void)
21197 {
21198 int arch;
21199 arm_feature_set flags;
21200 arm_feature_set tmp;
21201 const cpu_arch_ver_table *p;
21202
21203 /* Choose the architecture based on the capabilities of the requested cpu
21204 (if any) and/or the instructions actually used. */
21205 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
21206 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
21207 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
21208 /*Allow the user to override the reported architecture. */
21209 if (object_arch)
21210 {
21211 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
21212 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
21213 }
21214
21215 tmp = flags;
21216 arch = 0;
21217 for (p = cpu_arch_ver; p->val; p++)
21218 {
21219 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
21220 {
21221 arch = p->val;
21222 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
21223 }
21224 }
21225
21226 /* Tag_CPU_name. */
21227 if (selected_cpu_name[0])
21228 {
21229 char *p;
21230
21231 p = selected_cpu_name;
21232 if (strncmp (p, "armv", 4) == 0)
21233 {
21234 int i;
21235
21236 p += 4;
21237 for (i = 0; p[i]; i++)
21238 p[i] = TOUPPER (p[i]);
21239 }
21240 aeabi_set_attribute_string (Tag_CPU_name, p);
21241 }
21242 /* Tag_CPU_arch. */
21243 aeabi_set_attribute_int (Tag_CPU_arch, arch);
21244 /* Tag_CPU_arch_profile. */
21245 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
21246 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
21247 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
21248 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
21249 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
21250 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
21251 /* Tag_ARM_ISA_use. */
21252 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
21253 || arch == 0)
21254 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
21255 /* Tag_THUMB_ISA_use. */
21256 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
21257 || arch == 0)
21258 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
21259 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
21260 /* Tag_VFP_arch. */
21261 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
21262 aeabi_set_attribute_int (Tag_VFP_arch, 3);
21263 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3))
21264 aeabi_set_attribute_int (Tag_VFP_arch, 4);
21265 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
21266 aeabi_set_attribute_int (Tag_VFP_arch, 2);
21267 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
21268 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
21269 aeabi_set_attribute_int (Tag_VFP_arch, 1);
21270 /* Tag_WMMX_arch. */
21271 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
21272 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
21273 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
21274 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
21275 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
21276 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
21277 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
21278 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
21279 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_fp16))
21280 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
21281 }
21282
21283 /* Add the default contents for the .ARM.attributes section. */
21284 void
21285 arm_md_end (void)
21286 {
21287 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
21288 return;
21289
21290 aeabi_set_public_attributes ();
21291 }
21292 #endif /* OBJ_ELF */
21293
21294
21295 /* Parse a .cpu directive. */
21296
21297 static void
21298 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
21299 {
21300 const struct arm_cpu_option_table *opt;
21301 char *name;
21302 char saved_char;
21303
21304 name = input_line_pointer;
21305 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
21306 input_line_pointer++;
21307 saved_char = *input_line_pointer;
21308 *input_line_pointer = 0;
21309
21310 /* Skip the first "all" entry. */
21311 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
21312 if (streq (opt->name, name))
21313 {
21314 mcpu_cpu_opt = &opt->value;
21315 selected_cpu = opt->value;
21316 if (opt->canonical_name)
21317 strcpy (selected_cpu_name, opt->canonical_name);
21318 else
21319 {
21320 int i;
21321 for (i = 0; opt->name[i]; i++)
21322 selected_cpu_name[i] = TOUPPER (opt->name[i]);
21323 selected_cpu_name[i] = 0;
21324 }
21325 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21326 *input_line_pointer = saved_char;
21327 demand_empty_rest_of_line ();
21328 return;
21329 }
21330 as_bad (_("unknown cpu `%s'"), name);
21331 *input_line_pointer = saved_char;
21332 ignore_rest_of_line ();
21333 }
21334
21335
21336 /* Parse a .arch directive. */
21337
21338 static void
21339 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
21340 {
21341 const struct arm_arch_option_table *opt;
21342 char saved_char;
21343 char *name;
21344
21345 name = input_line_pointer;
21346 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
21347 input_line_pointer++;
21348 saved_char = *input_line_pointer;
21349 *input_line_pointer = 0;
21350
21351 /* Skip the first "all" entry. */
21352 for (opt = arm_archs + 1; opt->name != NULL; opt++)
21353 if (streq (opt->name, name))
21354 {
21355 mcpu_cpu_opt = &opt->value;
21356 selected_cpu = opt->value;
21357 strcpy (selected_cpu_name, opt->name);
21358 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21359 *input_line_pointer = saved_char;
21360 demand_empty_rest_of_line ();
21361 return;
21362 }
21363
21364 as_bad (_("unknown architecture `%s'\n"), name);
21365 *input_line_pointer = saved_char;
21366 ignore_rest_of_line ();
21367 }
21368
21369
21370 /* Parse a .object_arch directive. */
21371
21372 static void
21373 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
21374 {
21375 const struct arm_arch_option_table *opt;
21376 char saved_char;
21377 char *name;
21378
21379 name = input_line_pointer;
21380 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
21381 input_line_pointer++;
21382 saved_char = *input_line_pointer;
21383 *input_line_pointer = 0;
21384
21385 /* Skip the first "all" entry. */
21386 for (opt = arm_archs + 1; opt->name != NULL; opt++)
21387 if (streq (opt->name, name))
21388 {
21389 object_arch = &opt->value;
21390 *input_line_pointer = saved_char;
21391 demand_empty_rest_of_line ();
21392 return;
21393 }
21394
21395 as_bad (_("unknown architecture `%s'\n"), name);
21396 *input_line_pointer = saved_char;
21397 ignore_rest_of_line ();
21398 }
21399
21400 /* Parse a .fpu directive. */
21401
21402 static void
21403 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
21404 {
21405 const struct arm_option_cpu_value_table *opt;
21406 char saved_char;
21407 char *name;
21408
21409 name = input_line_pointer;
21410 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
21411 input_line_pointer++;
21412 saved_char = *input_line_pointer;
21413 *input_line_pointer = 0;
21414
21415 for (opt = arm_fpus; opt->name != NULL; opt++)
21416 if (streq (opt->name, name))
21417 {
21418 mfpu_opt = &opt->value;
21419 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21420 *input_line_pointer = saved_char;
21421 demand_empty_rest_of_line ();
21422 return;
21423 }
21424
21425 as_bad (_("unknown floating point format `%s'\n"), name);
21426 *input_line_pointer = saved_char;
21427 ignore_rest_of_line ();
21428 }
21429
21430 /* Copy symbol information. */
21431
21432 void
21433 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
21434 {
21435 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
21436 }
21437
21438 #ifdef OBJ_ELF
21439 /* Given a symbolic attribute NAME, return the proper integer value.
21440 Returns -1 if the attribute is not known. */
21441
21442 int
21443 arm_convert_symbolic_attribute (const char *name)
21444 {
21445 static const struct
21446 {
21447 const char * name;
21448 const int tag;
21449 }
21450 attribute_table[] =
21451 {
21452 /* When you modify this table you should
21453 also modify the list in doc/c-arm.texi. */
21454 #define T(tag) {#tag, tag}
21455 T (Tag_CPU_raw_name),
21456 T (Tag_CPU_name),
21457 T (Tag_CPU_arch),
21458 T (Tag_CPU_arch_profile),
21459 T (Tag_ARM_ISA_use),
21460 T (Tag_THUMB_ISA_use),
21461 T (Tag_VFP_arch),
21462 T (Tag_WMMX_arch),
21463 T (Tag_Advanced_SIMD_arch),
21464 T (Tag_PCS_config),
21465 T (Tag_ABI_PCS_R9_use),
21466 T (Tag_ABI_PCS_RW_data),
21467 T (Tag_ABI_PCS_RO_data),
21468 T (Tag_ABI_PCS_GOT_use),
21469 T (Tag_ABI_PCS_wchar_t),
21470 T (Tag_ABI_FP_rounding),
21471 T (Tag_ABI_FP_denormal),
21472 T (Tag_ABI_FP_exceptions),
21473 T (Tag_ABI_FP_user_exceptions),
21474 T (Tag_ABI_FP_number_model),
21475 T (Tag_ABI_align8_needed),
21476 T (Tag_ABI_align8_preserved),
21477 T (Tag_ABI_enum_size),
21478 T (Tag_ABI_HardFP_use),
21479 T (Tag_ABI_VFP_args),
21480 T (Tag_ABI_WMMX_args),
21481 T (Tag_ABI_optimization_goals),
21482 T (Tag_ABI_FP_optimization_goals),
21483 T (Tag_compatibility),
21484 T (Tag_CPU_unaligned_access),
21485 T (Tag_VFP_HP_extension),
21486 T (Tag_ABI_FP_16bit_format),
21487 T (Tag_nodefaults),
21488 T (Tag_also_compatible_with),
21489 T (Tag_conformance),
21490 T (Tag_T2EE_use),
21491 T (Tag_Virtualization_use),
21492 T (Tag_MPextension_use)
21493 #undef T
21494 };
21495 unsigned int i;
21496
21497 if (name == NULL)
21498 return -1;
21499
21500 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
21501 if (strcmp (name, attribute_table[i].name) == 0)
21502 return attribute_table[i].tag;
21503
21504 return -1;
21505 }
21506 #endif /* OBJ_ELF */