* config/tc-arm.c (make_mapping_symbol): Hanle the case
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include "as.h"
29 #include <limits.h>
30 #include <stdarg.h>
31 #define NO_RELOC 0
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #ifdef OBJ_ELF
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
48
49 /* This structure holds the unwinding state. */
50
51 static struct
52 {
53 symbolS * proc_start;
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
58 segT saved_seg;
59 subsegT saved_subseg;
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
62 int opcode_count;
63 int opcode_alloc;
64 /* The number of bytes pushed to the stack. */
65 offsetT frame_size;
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
72 offsetT fp_offset;
73 int fp_reg;
74 /* Nonzero if an unwind_setfp directive has been seen. */
75 unsigned fp_used:1;
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
78 } unwind;
79
80 #endif /* OBJ_ELF */
81
82 /* Results from operand parsing worker functions. */
83
84 typedef enum
85 {
86 PARSE_OPERAND_SUCCESS,
87 PARSE_OPERAND_FAIL,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result;
90
91 enum arm_float_abi
92 {
93 ARM_FLOAT_ABI_HARD,
94 ARM_FLOAT_ABI_SOFTFP,
95 ARM_FLOAT_ABI_SOFT
96 };
97
98 /* Types of processor to assemble for. */
99 #ifndef CPU_DEFAULT
100 /* The code that was here used to select a default CPU depending on compiler
101 pre-defines which were only present when doing native builds, thus
102 changing gas' default behaviour depending upon the build host.
103
104 If you have a target that requires a default CPU option then the you
105 should define CPU_DEFAULT here. */
106 #endif
107
108 #ifndef FPU_DEFAULT
109 # ifdef TE_LINUX
110 # define FPU_DEFAULT FPU_ARCH_FPA
111 # elif defined (TE_NetBSD)
112 # ifdef OBJ_ELF
113 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
114 # else
115 /* Legacy a.out format. */
116 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
117 # endif
118 # elif defined (TE_VXWORKS)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
120 # else
121 /* For backwards compatibility, default to FPA. */
122 # define FPU_DEFAULT FPU_ARCH_FPA
123 # endif
124 #endif /* ifndef FPU_DEFAULT */
125
126 #define streq(a, b) (strcmp (a, b) == 0)
127
128 static arm_feature_set cpu_variant;
129 static arm_feature_set arm_arch_used;
130 static arm_feature_set thumb_arch_used;
131
132 /* Flags stored in private area of BFD structure. */
133 static int uses_apcs_26 = FALSE;
134 static int atpcs = FALSE;
135 static int support_interwork = FALSE;
136 static int uses_apcs_float = FALSE;
137 static int pic_code = FALSE;
138 static int fix_v4bx = FALSE;
139 /* Warn on using deprecated features. */
140 static int warn_on_deprecated = TRUE;
141
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
188 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_m =
199 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_V7M, 0);
200
201 static const arm_feature_set arm_arch_any = ARM_ANY;
202 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
203 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
204 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
205
206 static const arm_feature_set arm_cext_iwmmxt2 =
207 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
208 static const arm_feature_set arm_cext_iwmmxt =
209 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
210 static const arm_feature_set arm_cext_xscale =
211 ARM_FEATURE (0, ARM_CEXT_XSCALE);
212 static const arm_feature_set arm_cext_maverick =
213 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
214 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
215 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
216 static const arm_feature_set fpu_vfp_ext_v1xd =
217 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
218 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
219 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
220 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
221 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
222 static const arm_feature_set fpu_vfp_ext_d32 =
223 ARM_FEATURE (0, FPU_VFP_EXT_D32);
224 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
225 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
226 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
227 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
228 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
229 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
230
231 static int mfloat_abi_opt = -1;
232 /* Record user cpu selection for object attributes. */
233 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
234 /* Must be long enough to hold any of the names in arm_cpus. */
235 static char selected_cpu_name[16];
236 #ifdef OBJ_ELF
237 # ifdef EABI_DEFAULT
238 static int meabi_flags = EABI_DEFAULT;
239 # else
240 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
241 # endif
242
243 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
244
245 bfd_boolean
246 arm_is_eabi (void)
247 {
248 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
249 }
250 #endif
251
252 #ifdef OBJ_ELF
253 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
254 symbolS * GOT_symbol;
255 #endif
256
257 /* 0: assemble for ARM,
258 1: assemble for Thumb,
259 2: assemble for Thumb even though target CPU does not support thumb
260 instructions. */
261 static int thumb_mode = 0;
262 /* A value distinct from the possible values for thumb_mode that we
263 can use to record whether thumb_mode has been copied into the
264 tc_frag_data field of a frag. */
265 #define MODE_RECORDED (1 << 4)
266
267 /* Specifies the intrinsic IT insn behavior mode. */
268 enum implicit_it_mode
269 {
270 IMPLICIT_IT_MODE_NEVER = 0x00,
271 IMPLICIT_IT_MODE_ARM = 0x01,
272 IMPLICIT_IT_MODE_THUMB = 0x02,
273 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
274 };
275 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
276
277 /* If unified_syntax is true, we are processing the new unified
278 ARM/Thumb syntax. Important differences from the old ARM mode:
279
280 - Immediate operands do not require a # prefix.
281 - Conditional affixes always appear at the end of the
282 instruction. (For backward compatibility, those instructions
283 that formerly had them in the middle, continue to accept them
284 there.)
285 - The IT instruction may appear, and if it does is validated
286 against subsequent conditional affixes. It does not generate
287 machine code.
288
289 Important differences from the old Thumb mode:
290
291 - Immediate operands do not require a # prefix.
292 - Most of the V6T2 instructions are only available in unified mode.
293 - The .N and .W suffixes are recognized and honored (it is an error
294 if they cannot be honored).
295 - All instructions set the flags if and only if they have an 's' affix.
296 - Conditional affixes may be used. They are validated against
297 preceding IT instructions. Unlike ARM mode, you cannot use a
298 conditional affix except in the scope of an IT instruction. */
299
300 static bfd_boolean unified_syntax = FALSE;
301
302 enum neon_el_type
303 {
304 NT_invtype,
305 NT_untyped,
306 NT_integer,
307 NT_float,
308 NT_poly,
309 NT_signed,
310 NT_unsigned
311 };
312
313 struct neon_type_el
314 {
315 enum neon_el_type type;
316 unsigned size;
317 };
318
319 #define NEON_MAX_TYPE_ELS 4
320
321 struct neon_type
322 {
323 struct neon_type_el el[NEON_MAX_TYPE_ELS];
324 unsigned elems;
325 };
326
327 enum it_instruction_type
328 {
329 OUTSIDE_IT_INSN,
330 INSIDE_IT_INSN,
331 INSIDE_IT_LAST_INSN,
332 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
333 if inside, should be the last one. */
334 NEUTRAL_IT_INSN, /* This could be either inside or outside,
335 i.e. BKPT and NOP. */
336 IT_INSN /* The IT insn has been parsed. */
337 };
338
339 struct arm_it
340 {
341 const char * error;
342 unsigned long instruction;
343 int size;
344 int size_req;
345 int cond;
346 /* "uncond_value" is set to the value in place of the conditional field in
347 unconditional versions of the instruction, or -1 if nothing is
348 appropriate. */
349 int uncond_value;
350 struct neon_type vectype;
351 /* This does not indicate an actual NEON instruction, only that
352 the mnemonic accepts neon-style type suffixes. */
353 int is_neon;
354 /* Set to the opcode if the instruction needs relaxation.
355 Zero if the instruction is not relaxed. */
356 unsigned long relax;
357 struct
358 {
359 bfd_reloc_code_real_type type;
360 expressionS exp;
361 int pc_rel;
362 } reloc;
363
364 enum it_instruction_type it_insn_type;
365
366 struct
367 {
368 unsigned reg;
369 signed int imm;
370 struct neon_type_el vectype;
371 unsigned present : 1; /* Operand present. */
372 unsigned isreg : 1; /* Operand was a register. */
373 unsigned immisreg : 1; /* .imm field is a second register. */
374 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
375 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
376 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
377 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
378 instructions. This allows us to disambiguate ARM <-> vector insns. */
379 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
380 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
381 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
382 unsigned issingle : 1; /* Operand is VFP single-precision register. */
383 unsigned hasreloc : 1; /* Operand has relocation suffix. */
384 unsigned writeback : 1; /* Operand has trailing ! */
385 unsigned preind : 1; /* Preindexed address. */
386 unsigned postind : 1; /* Postindexed address. */
387 unsigned negative : 1; /* Index register was negated. */
388 unsigned shifted : 1; /* Shift applied to operation. */
389 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
390 } operands[6];
391 };
392
393 static struct arm_it inst;
394
395 #define NUM_FLOAT_VALS 8
396
397 const char * fp_const[] =
398 {
399 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
400 };
401
402 /* Number of littlenums required to hold an extended precision number. */
403 #define MAX_LITTLENUMS 6
404
405 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
406
407 #define FAIL (-1)
408 #define SUCCESS (0)
409
410 #define SUFF_S 1
411 #define SUFF_D 2
412 #define SUFF_E 3
413 #define SUFF_P 4
414
415 #define CP_T_X 0x00008000
416 #define CP_T_Y 0x00400000
417
418 #define CONDS_BIT 0x00100000
419 #define LOAD_BIT 0x00100000
420
421 #define DOUBLE_LOAD_FLAG 0x00000001
422
423 struct asm_cond
424 {
425 const char * template_name;
426 unsigned long value;
427 };
428
429 #define COND_ALWAYS 0xE
430
431 struct asm_psr
432 {
433 const char * template_name;
434 unsigned long field;
435 };
436
437 struct asm_barrier_opt
438 {
439 const char * template_name;
440 unsigned long value;
441 };
442
443 /* The bit that distinguishes CPSR and SPSR. */
444 #define SPSR_BIT (1 << 22)
445
446 /* The individual PSR flag bits. */
447 #define PSR_c (1 << 16)
448 #define PSR_x (1 << 17)
449 #define PSR_s (1 << 18)
450 #define PSR_f (1 << 19)
451
452 struct reloc_entry
453 {
454 char * name;
455 bfd_reloc_code_real_type reloc;
456 };
457
458 enum vfp_reg_pos
459 {
460 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
461 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
462 };
463
464 enum vfp_ldstm_type
465 {
466 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
467 };
468
469 /* Bits for DEFINED field in neon_typed_alias. */
470 #define NTA_HASTYPE 1
471 #define NTA_HASINDEX 2
472
473 struct neon_typed_alias
474 {
475 unsigned char defined;
476 unsigned char index;
477 struct neon_type_el eltype;
478 };
479
480 /* ARM register categories. This includes coprocessor numbers and various
481 architecture extensions' registers. */
482 enum arm_reg_type
483 {
484 REG_TYPE_RN,
485 REG_TYPE_CP,
486 REG_TYPE_CN,
487 REG_TYPE_FN,
488 REG_TYPE_VFS,
489 REG_TYPE_VFD,
490 REG_TYPE_NQ,
491 REG_TYPE_VFSD,
492 REG_TYPE_NDQ,
493 REG_TYPE_NSDQ,
494 REG_TYPE_VFC,
495 REG_TYPE_MVF,
496 REG_TYPE_MVD,
497 REG_TYPE_MVFX,
498 REG_TYPE_MVDX,
499 REG_TYPE_MVAX,
500 REG_TYPE_DSPSC,
501 REG_TYPE_MMXWR,
502 REG_TYPE_MMXWC,
503 REG_TYPE_MMXWCG,
504 REG_TYPE_XSCALE,
505 };
506
507 /* Structure for a hash table entry for a register.
508 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
509 information which states whether a vector type or index is specified (for a
510 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
511 struct reg_entry
512 {
513 const char * name;
514 unsigned char number;
515 unsigned char type;
516 unsigned char builtin;
517 struct neon_typed_alias * neon;
518 };
519
520 /* Diagnostics used when we don't get a register of the expected type. */
521 const char * const reg_expected_msgs[] =
522 {
523 N_("ARM register expected"),
524 N_("bad or missing co-processor number"),
525 N_("co-processor register expected"),
526 N_("FPA register expected"),
527 N_("VFP single precision register expected"),
528 N_("VFP/Neon double precision register expected"),
529 N_("Neon quad precision register expected"),
530 N_("VFP single or double precision register expected"),
531 N_("Neon double or quad precision register expected"),
532 N_("VFP single, double or Neon quad precision register expected"),
533 N_("VFP system register expected"),
534 N_("Maverick MVF register expected"),
535 N_("Maverick MVD register expected"),
536 N_("Maverick MVFX register expected"),
537 N_("Maverick MVDX register expected"),
538 N_("Maverick MVAX register expected"),
539 N_("Maverick DSPSC register expected"),
540 N_("iWMMXt data register expected"),
541 N_("iWMMXt control register expected"),
542 N_("iWMMXt scalar register expected"),
543 N_("XScale accumulator register expected"),
544 };
545
546 /* Some well known registers that we refer to directly elsewhere. */
547 #define REG_SP 13
548 #define REG_LR 14
549 #define REG_PC 15
550
551 /* ARM instructions take 4bytes in the object file, Thumb instructions
552 take 2: */
553 #define INSN_SIZE 4
554
555 struct asm_opcode
556 {
557 /* Basic string to match. */
558 const char * template_name;
559
560 /* Parameters to instruction. */
561 unsigned int operands[8];
562
563 /* Conditional tag - see opcode_lookup. */
564 unsigned int tag : 4;
565
566 /* Basic instruction code. */
567 unsigned int avalue : 28;
568
569 /* Thumb-format instruction code. */
570 unsigned int tvalue;
571
572 /* Which architecture variant provides this instruction. */
573 const arm_feature_set * avariant;
574 const arm_feature_set * tvariant;
575
576 /* Function to call to encode instruction in ARM format. */
577 void (* aencode) (void);
578
579 /* Function to call to encode instruction in Thumb format. */
580 void (* tencode) (void);
581 };
582
583 /* Defines for various bits that we will want to toggle. */
584 #define INST_IMMEDIATE 0x02000000
585 #define OFFSET_REG 0x02000000
586 #define HWOFFSET_IMM 0x00400000
587 #define SHIFT_BY_REG 0x00000010
588 #define PRE_INDEX 0x01000000
589 #define INDEX_UP 0x00800000
590 #define WRITE_BACK 0x00200000
591 #define LDM_TYPE_2_OR_3 0x00400000
592 #define CPSI_MMOD 0x00020000
593
594 #define LITERAL_MASK 0xf000f000
595 #define OPCODE_MASK 0xfe1fffff
596 #define V4_STR_BIT 0x00000020
597
598 #define T2_SUBS_PC_LR 0xf3de8f00
599
600 #define DATA_OP_SHIFT 21
601
602 #define T2_OPCODE_MASK 0xfe1fffff
603 #define T2_DATA_OP_SHIFT 21
604
605 /* Codes to distinguish the arithmetic instructions. */
606 #define OPCODE_AND 0
607 #define OPCODE_EOR 1
608 #define OPCODE_SUB 2
609 #define OPCODE_RSB 3
610 #define OPCODE_ADD 4
611 #define OPCODE_ADC 5
612 #define OPCODE_SBC 6
613 #define OPCODE_RSC 7
614 #define OPCODE_TST 8
615 #define OPCODE_TEQ 9
616 #define OPCODE_CMP 10
617 #define OPCODE_CMN 11
618 #define OPCODE_ORR 12
619 #define OPCODE_MOV 13
620 #define OPCODE_BIC 14
621 #define OPCODE_MVN 15
622
623 #define T2_OPCODE_AND 0
624 #define T2_OPCODE_BIC 1
625 #define T2_OPCODE_ORR 2
626 #define T2_OPCODE_ORN 3
627 #define T2_OPCODE_EOR 4
628 #define T2_OPCODE_ADD 8
629 #define T2_OPCODE_ADC 10
630 #define T2_OPCODE_SBC 11
631 #define T2_OPCODE_SUB 13
632 #define T2_OPCODE_RSB 14
633
634 #define T_OPCODE_MUL 0x4340
635 #define T_OPCODE_TST 0x4200
636 #define T_OPCODE_CMN 0x42c0
637 #define T_OPCODE_NEG 0x4240
638 #define T_OPCODE_MVN 0x43c0
639
640 #define T_OPCODE_ADD_R3 0x1800
641 #define T_OPCODE_SUB_R3 0x1a00
642 #define T_OPCODE_ADD_HI 0x4400
643 #define T_OPCODE_ADD_ST 0xb000
644 #define T_OPCODE_SUB_ST 0xb080
645 #define T_OPCODE_ADD_SP 0xa800
646 #define T_OPCODE_ADD_PC 0xa000
647 #define T_OPCODE_ADD_I8 0x3000
648 #define T_OPCODE_SUB_I8 0x3800
649 #define T_OPCODE_ADD_I3 0x1c00
650 #define T_OPCODE_SUB_I3 0x1e00
651
652 #define T_OPCODE_ASR_R 0x4100
653 #define T_OPCODE_LSL_R 0x4080
654 #define T_OPCODE_LSR_R 0x40c0
655 #define T_OPCODE_ROR_R 0x41c0
656 #define T_OPCODE_ASR_I 0x1000
657 #define T_OPCODE_LSL_I 0x0000
658 #define T_OPCODE_LSR_I 0x0800
659
660 #define T_OPCODE_MOV_I8 0x2000
661 #define T_OPCODE_CMP_I8 0x2800
662 #define T_OPCODE_CMP_LR 0x4280
663 #define T_OPCODE_MOV_HR 0x4600
664 #define T_OPCODE_CMP_HR 0x4500
665
666 #define T_OPCODE_LDR_PC 0x4800
667 #define T_OPCODE_LDR_SP 0x9800
668 #define T_OPCODE_STR_SP 0x9000
669 #define T_OPCODE_LDR_IW 0x6800
670 #define T_OPCODE_STR_IW 0x6000
671 #define T_OPCODE_LDR_IH 0x8800
672 #define T_OPCODE_STR_IH 0x8000
673 #define T_OPCODE_LDR_IB 0x7800
674 #define T_OPCODE_STR_IB 0x7000
675 #define T_OPCODE_LDR_RW 0x5800
676 #define T_OPCODE_STR_RW 0x5000
677 #define T_OPCODE_LDR_RH 0x5a00
678 #define T_OPCODE_STR_RH 0x5200
679 #define T_OPCODE_LDR_RB 0x5c00
680 #define T_OPCODE_STR_RB 0x5400
681
682 #define T_OPCODE_PUSH 0xb400
683 #define T_OPCODE_POP 0xbc00
684
685 #define T_OPCODE_BRANCH 0xe000
686
687 #define THUMB_SIZE 2 /* Size of thumb instruction. */
688 #define THUMB_PP_PC_LR 0x0100
689 #define THUMB_LOAD_BIT 0x0800
690 #define THUMB2_LOAD_BIT 0x00100000
691
692 #define BAD_ARGS _("bad arguments to instruction")
693 #define BAD_SP _("r13 not allowed here")
694 #define BAD_PC _("r15 not allowed here")
695 #define BAD_COND _("instruction cannot be conditional")
696 #define BAD_OVERLAP _("registers may not be the same")
697 #define BAD_HIREG _("lo register required")
698 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
699 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
700 #define BAD_BRANCH _("branch must be last instruction in IT block")
701 #define BAD_NOT_IT _("instruction not allowed in IT block")
702 #define BAD_FPU _("selected FPU does not support instruction")
703 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
704 #define BAD_IT_COND _("incorrect condition in IT block")
705 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
706 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
707 #define BAD_PC_ADDRESSING \
708 _("cannot use register index with PC-relative addressing")
709 #define BAD_PC_WRITEBACK \
710 _("cannot use writeback with PC-relative addressing")
711
712 static struct hash_control * arm_ops_hsh;
713 static struct hash_control * arm_cond_hsh;
714 static struct hash_control * arm_shift_hsh;
715 static struct hash_control * arm_psr_hsh;
716 static struct hash_control * arm_v7m_psr_hsh;
717 static struct hash_control * arm_reg_hsh;
718 static struct hash_control * arm_reloc_hsh;
719 static struct hash_control * arm_barrier_opt_hsh;
720
721 /* Stuff needed to resolve the label ambiguity
722 As:
723 ...
724 label: <insn>
725 may differ from:
726 ...
727 label:
728 <insn> */
729
730 symbolS * last_label_seen;
731 static int label_is_thumb_function_name = FALSE;
732
733 /* Literal pool structure. Held on a per-section
734 and per-sub-section basis. */
735
736 #define MAX_LITERAL_POOL_SIZE 1024
737 typedef struct literal_pool
738 {
739 expressionS literals [MAX_LITERAL_POOL_SIZE];
740 unsigned int next_free_entry;
741 unsigned int id;
742 symbolS * symbol;
743 segT section;
744 subsegT sub_section;
745 struct literal_pool * next;
746 } literal_pool;
747
748 /* Pointer to a linked list of literal pools. */
749 literal_pool * list_of_pools = NULL;
750
751 #ifdef OBJ_ELF
752 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
753 #else
754 static struct current_it now_it;
755 #endif
756
757 static inline int
758 now_it_compatible (int cond)
759 {
760 return (cond & ~1) == (now_it.cc & ~1);
761 }
762
763 static inline int
764 conditional_insn (void)
765 {
766 return inst.cond != COND_ALWAYS;
767 }
768
769 static int in_it_block (void);
770
771 static int handle_it_state (void);
772
773 static void force_automatic_it_block_close (void);
774
775 static void it_fsm_post_encode (void);
776
777 #define set_it_insn_type(type) \
778 do \
779 { \
780 inst.it_insn_type = type; \
781 if (handle_it_state () == FAIL) \
782 return; \
783 } \
784 while (0)
785
786 #define set_it_insn_type_nonvoid(type, failret) \
787 do \
788 { \
789 inst.it_insn_type = type; \
790 if (handle_it_state () == FAIL) \
791 return failret; \
792 } \
793 while(0)
794
795 #define set_it_insn_type_last() \
796 do \
797 { \
798 if (inst.cond == COND_ALWAYS) \
799 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
800 else \
801 set_it_insn_type (INSIDE_IT_LAST_INSN); \
802 } \
803 while (0)
804
805 /* Pure syntax. */
806
807 /* This array holds the chars that always start a comment. If the
808 pre-processor is disabled, these aren't very useful. */
809 const char comment_chars[] = "@";
810
811 /* This array holds the chars that only start a comment at the beginning of
812 a line. If the line seems to have the form '# 123 filename'
813 .line and .file directives will appear in the pre-processed output. */
814 /* Note that input_file.c hand checks for '#' at the beginning of the
815 first line of the input file. This is because the compiler outputs
816 #NO_APP at the beginning of its output. */
817 /* Also note that comments like this one will always work. */
818 const char line_comment_chars[] = "#";
819
820 const char line_separator_chars[] = ";";
821
822 /* Chars that can be used to separate mant
823 from exp in floating point numbers. */
824 const char EXP_CHARS[] = "eE";
825
826 /* Chars that mean this number is a floating point constant. */
827 /* As in 0f12.456 */
828 /* or 0d1.2345e12 */
829
830 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
831
832 /* Prefix characters that indicate the start of an immediate
833 value. */
834 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
835
836 /* Separator character handling. */
837
838 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
839
840 static inline int
841 skip_past_char (char ** str, char c)
842 {
843 if (**str == c)
844 {
845 (*str)++;
846 return SUCCESS;
847 }
848 else
849 return FAIL;
850 }
851
852 #define skip_past_comma(str) skip_past_char (str, ',')
853
854 /* Arithmetic expressions (possibly involving symbols). */
855
856 /* Return TRUE if anything in the expression is a bignum. */
857
858 static int
859 walk_no_bignums (symbolS * sp)
860 {
861 if (symbol_get_value_expression (sp)->X_op == O_big)
862 return 1;
863
864 if (symbol_get_value_expression (sp)->X_add_symbol)
865 {
866 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
867 || (symbol_get_value_expression (sp)->X_op_symbol
868 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
869 }
870
871 return 0;
872 }
873
874 static int in_my_get_expression = 0;
875
876 /* Third argument to my_get_expression. */
877 #define GE_NO_PREFIX 0
878 #define GE_IMM_PREFIX 1
879 #define GE_OPT_PREFIX 2
880 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
881 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
882 #define GE_OPT_PREFIX_BIG 3
883
884 static int
885 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
886 {
887 char * save_in;
888 segT seg;
889
890 /* In unified syntax, all prefixes are optional. */
891 if (unified_syntax)
892 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
893 : GE_OPT_PREFIX;
894
895 switch (prefix_mode)
896 {
897 case GE_NO_PREFIX: break;
898 case GE_IMM_PREFIX:
899 if (!is_immediate_prefix (**str))
900 {
901 inst.error = _("immediate expression requires a # prefix");
902 return FAIL;
903 }
904 (*str)++;
905 break;
906 case GE_OPT_PREFIX:
907 case GE_OPT_PREFIX_BIG:
908 if (is_immediate_prefix (**str))
909 (*str)++;
910 break;
911 default: abort ();
912 }
913
914 memset (ep, 0, sizeof (expressionS));
915
916 save_in = input_line_pointer;
917 input_line_pointer = *str;
918 in_my_get_expression = 1;
919 seg = expression (ep);
920 in_my_get_expression = 0;
921
922 if (ep->X_op == O_illegal || ep->X_op == O_absent)
923 {
924 /* We found a bad or missing expression in md_operand(). */
925 *str = input_line_pointer;
926 input_line_pointer = save_in;
927 if (inst.error == NULL)
928 inst.error = (ep->X_op == O_absent
929 ? _("missing expression") :_("bad expression"));
930 return 1;
931 }
932
933 #ifdef OBJ_AOUT
934 if (seg != absolute_section
935 && seg != text_section
936 && seg != data_section
937 && seg != bss_section
938 && seg != undefined_section)
939 {
940 inst.error = _("bad segment");
941 *str = input_line_pointer;
942 input_line_pointer = save_in;
943 return 1;
944 }
945 #endif
946
947 /* Get rid of any bignums now, so that we don't generate an error for which
948 we can't establish a line number later on. Big numbers are never valid
949 in instructions, which is where this routine is always called. */
950 if (prefix_mode != GE_OPT_PREFIX_BIG
951 && (ep->X_op == O_big
952 || (ep->X_add_symbol
953 && (walk_no_bignums (ep->X_add_symbol)
954 || (ep->X_op_symbol
955 && walk_no_bignums (ep->X_op_symbol))))))
956 {
957 inst.error = _("invalid constant");
958 *str = input_line_pointer;
959 input_line_pointer = save_in;
960 return 1;
961 }
962
963 *str = input_line_pointer;
964 input_line_pointer = save_in;
965 return 0;
966 }
967
968 /* Turn a string in input_line_pointer into a floating point constant
969 of type TYPE, and store the appropriate bytes in *LITP. The number
970 of LITTLENUMS emitted is stored in *SIZEP. An error message is
971 returned, or NULL on OK.
972
973 Note that fp constants aren't represent in the normal way on the ARM.
974 In big endian mode, things are as expected. However, in little endian
975 mode fp constants are big-endian word-wise, and little-endian byte-wise
976 within the words. For example, (double) 1.1 in big endian mode is
977 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
978 the byte sequence 99 99 f1 3f 9a 99 99 99.
979
980 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
981
982 char *
983 md_atof (int type, char * litP, int * sizeP)
984 {
985 int prec;
986 LITTLENUM_TYPE words[MAX_LITTLENUMS];
987 char *t;
988 int i;
989
990 switch (type)
991 {
992 case 'f':
993 case 'F':
994 case 's':
995 case 'S':
996 prec = 2;
997 break;
998
999 case 'd':
1000 case 'D':
1001 case 'r':
1002 case 'R':
1003 prec = 4;
1004 break;
1005
1006 case 'x':
1007 case 'X':
1008 prec = 5;
1009 break;
1010
1011 case 'p':
1012 case 'P':
1013 prec = 5;
1014 break;
1015
1016 default:
1017 *sizeP = 0;
1018 return _("Unrecognized or unsupported floating point constant");
1019 }
1020
1021 t = atof_ieee (input_line_pointer, type, words);
1022 if (t)
1023 input_line_pointer = t;
1024 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1025
1026 if (target_big_endian)
1027 {
1028 for (i = 0; i < prec; i++)
1029 {
1030 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1031 litP += sizeof (LITTLENUM_TYPE);
1032 }
1033 }
1034 else
1035 {
1036 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1037 for (i = prec - 1; i >= 0; i--)
1038 {
1039 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1040 litP += sizeof (LITTLENUM_TYPE);
1041 }
1042 else
1043 /* For a 4 byte float the order of elements in `words' is 1 0.
1044 For an 8 byte float the order is 1 0 3 2. */
1045 for (i = 0; i < prec; i += 2)
1046 {
1047 md_number_to_chars (litP, (valueT) words[i + 1],
1048 sizeof (LITTLENUM_TYPE));
1049 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1050 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1051 litP += 2 * sizeof (LITTLENUM_TYPE);
1052 }
1053 }
1054
1055 return NULL;
1056 }
1057
1058 /* We handle all bad expressions here, so that we can report the faulty
1059 instruction in the error message. */
1060 void
1061 md_operand (expressionS * exp)
1062 {
1063 if (in_my_get_expression)
1064 exp->X_op = O_illegal;
1065 }
1066
1067 /* Immediate values. */
1068
1069 /* Generic immediate-value read function for use in directives.
1070 Accepts anything that 'expression' can fold to a constant.
1071 *val receives the number. */
1072 #ifdef OBJ_ELF
1073 static int
1074 immediate_for_directive (int *val)
1075 {
1076 expressionS exp;
1077 exp.X_op = O_illegal;
1078
1079 if (is_immediate_prefix (*input_line_pointer))
1080 {
1081 input_line_pointer++;
1082 expression (&exp);
1083 }
1084
1085 if (exp.X_op != O_constant)
1086 {
1087 as_bad (_("expected #constant"));
1088 ignore_rest_of_line ();
1089 return FAIL;
1090 }
1091 *val = exp.X_add_number;
1092 return SUCCESS;
1093 }
1094 #endif
1095
1096 /* Register parsing. */
1097
1098 /* Generic register parser. CCP points to what should be the
1099 beginning of a register name. If it is indeed a valid register
1100 name, advance CCP over it and return the reg_entry structure;
1101 otherwise return NULL. Does not issue diagnostics. */
1102
1103 static struct reg_entry *
1104 arm_reg_parse_multi (char **ccp)
1105 {
1106 char *start = *ccp;
1107 char *p;
1108 struct reg_entry *reg;
1109
1110 #ifdef REGISTER_PREFIX
1111 if (*start != REGISTER_PREFIX)
1112 return NULL;
1113 start++;
1114 #endif
1115 #ifdef OPTIONAL_REGISTER_PREFIX
1116 if (*start == OPTIONAL_REGISTER_PREFIX)
1117 start++;
1118 #endif
1119
1120 p = start;
1121 if (!ISALPHA (*p) || !is_name_beginner (*p))
1122 return NULL;
1123
1124 do
1125 p++;
1126 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1127
1128 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1129
1130 if (!reg)
1131 return NULL;
1132
1133 *ccp = p;
1134 return reg;
1135 }
1136
1137 static int
1138 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1139 enum arm_reg_type type)
1140 {
1141 /* Alternative syntaxes are accepted for a few register classes. */
1142 switch (type)
1143 {
1144 case REG_TYPE_MVF:
1145 case REG_TYPE_MVD:
1146 case REG_TYPE_MVFX:
1147 case REG_TYPE_MVDX:
1148 /* Generic coprocessor register names are allowed for these. */
1149 if (reg && reg->type == REG_TYPE_CN)
1150 return reg->number;
1151 break;
1152
1153 case REG_TYPE_CP:
1154 /* For backward compatibility, a bare number is valid here. */
1155 {
1156 unsigned long processor = strtoul (start, ccp, 10);
1157 if (*ccp != start && processor <= 15)
1158 return processor;
1159 }
1160
1161 case REG_TYPE_MMXWC:
1162 /* WC includes WCG. ??? I'm not sure this is true for all
1163 instructions that take WC registers. */
1164 if (reg && reg->type == REG_TYPE_MMXWCG)
1165 return reg->number;
1166 break;
1167
1168 default:
1169 break;
1170 }
1171
1172 return FAIL;
1173 }
1174
1175 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1176 return value is the register number or FAIL. */
1177
1178 static int
1179 arm_reg_parse (char **ccp, enum arm_reg_type type)
1180 {
1181 char *start = *ccp;
1182 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1183 int ret;
1184
1185 /* Do not allow a scalar (reg+index) to parse as a register. */
1186 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1187 return FAIL;
1188
1189 if (reg && reg->type == type)
1190 return reg->number;
1191
1192 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1193 return ret;
1194
1195 *ccp = start;
1196 return FAIL;
1197 }
1198
1199 /* Parse a Neon type specifier. *STR should point at the leading '.'
1200 character. Does no verification at this stage that the type fits the opcode
1201 properly. E.g.,
1202
1203 .i32.i32.s16
1204 .s32.f32
1205 .u16
1206
1207 Can all be legally parsed by this function.
1208
1209 Fills in neon_type struct pointer with parsed information, and updates STR
1210 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1211 type, FAIL if not. */
1212
1213 static int
1214 parse_neon_type (struct neon_type *type, char **str)
1215 {
1216 char *ptr = *str;
1217
1218 if (type)
1219 type->elems = 0;
1220
1221 while (type->elems < NEON_MAX_TYPE_ELS)
1222 {
1223 enum neon_el_type thistype = NT_untyped;
1224 unsigned thissize = -1u;
1225
1226 if (*ptr != '.')
1227 break;
1228
1229 ptr++;
1230
1231 /* Just a size without an explicit type. */
1232 if (ISDIGIT (*ptr))
1233 goto parsesize;
1234
1235 switch (TOLOWER (*ptr))
1236 {
1237 case 'i': thistype = NT_integer; break;
1238 case 'f': thistype = NT_float; break;
1239 case 'p': thistype = NT_poly; break;
1240 case 's': thistype = NT_signed; break;
1241 case 'u': thistype = NT_unsigned; break;
1242 case 'd':
1243 thistype = NT_float;
1244 thissize = 64;
1245 ptr++;
1246 goto done;
1247 default:
1248 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1249 return FAIL;
1250 }
1251
1252 ptr++;
1253
1254 /* .f is an abbreviation for .f32. */
1255 if (thistype == NT_float && !ISDIGIT (*ptr))
1256 thissize = 32;
1257 else
1258 {
1259 parsesize:
1260 thissize = strtoul (ptr, &ptr, 10);
1261
1262 if (thissize != 8 && thissize != 16 && thissize != 32
1263 && thissize != 64)
1264 {
1265 as_bad (_("bad size %d in type specifier"), thissize);
1266 return FAIL;
1267 }
1268 }
1269
1270 done:
1271 if (type)
1272 {
1273 type->el[type->elems].type = thistype;
1274 type->el[type->elems].size = thissize;
1275 type->elems++;
1276 }
1277 }
1278
1279 /* Empty/missing type is not a successful parse. */
1280 if (type->elems == 0)
1281 return FAIL;
1282
1283 *str = ptr;
1284
1285 return SUCCESS;
1286 }
1287
1288 /* Errors may be set multiple times during parsing or bit encoding
1289 (particularly in the Neon bits), but usually the earliest error which is set
1290 will be the most meaningful. Avoid overwriting it with later (cascading)
1291 errors by calling this function. */
1292
1293 static void
1294 first_error (const char *err)
1295 {
1296 if (!inst.error)
1297 inst.error = err;
1298 }
1299
1300 /* Parse a single type, e.g. ".s32", leading period included. */
1301 static int
1302 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1303 {
1304 char *str = *ccp;
1305 struct neon_type optype;
1306
1307 if (*str == '.')
1308 {
1309 if (parse_neon_type (&optype, &str) == SUCCESS)
1310 {
1311 if (optype.elems == 1)
1312 *vectype = optype.el[0];
1313 else
1314 {
1315 first_error (_("only one type should be specified for operand"));
1316 return FAIL;
1317 }
1318 }
1319 else
1320 {
1321 first_error (_("vector type expected"));
1322 return FAIL;
1323 }
1324 }
1325 else
1326 return FAIL;
1327
1328 *ccp = str;
1329
1330 return SUCCESS;
1331 }
1332
1333 /* Special meanings for indices (which have a range of 0-7), which will fit into
1334 a 4-bit integer. */
1335
1336 #define NEON_ALL_LANES 15
1337 #define NEON_INTERLEAVE_LANES 14
1338
1339 /* Parse either a register or a scalar, with an optional type. Return the
1340 register number, and optionally fill in the actual type of the register
1341 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1342 type/index information in *TYPEINFO. */
1343
1344 static int
1345 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1346 enum arm_reg_type *rtype,
1347 struct neon_typed_alias *typeinfo)
1348 {
1349 char *str = *ccp;
1350 struct reg_entry *reg = arm_reg_parse_multi (&str);
1351 struct neon_typed_alias atype;
1352 struct neon_type_el parsetype;
1353
1354 atype.defined = 0;
1355 atype.index = -1;
1356 atype.eltype.type = NT_invtype;
1357 atype.eltype.size = -1;
1358
1359 /* Try alternate syntax for some types of register. Note these are mutually
1360 exclusive with the Neon syntax extensions. */
1361 if (reg == NULL)
1362 {
1363 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1364 if (altreg != FAIL)
1365 *ccp = str;
1366 if (typeinfo)
1367 *typeinfo = atype;
1368 return altreg;
1369 }
1370
1371 /* Undo polymorphism when a set of register types may be accepted. */
1372 if ((type == REG_TYPE_NDQ
1373 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1374 || (type == REG_TYPE_VFSD
1375 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1376 || (type == REG_TYPE_NSDQ
1377 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1378 || reg->type == REG_TYPE_NQ))
1379 || (type == REG_TYPE_MMXWC
1380 && (reg->type == REG_TYPE_MMXWCG)))
1381 type = (enum arm_reg_type) reg->type;
1382
1383 if (type != reg->type)
1384 return FAIL;
1385
1386 if (reg->neon)
1387 atype = *reg->neon;
1388
1389 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1390 {
1391 if ((atype.defined & NTA_HASTYPE) != 0)
1392 {
1393 first_error (_("can't redefine type for operand"));
1394 return FAIL;
1395 }
1396 atype.defined |= NTA_HASTYPE;
1397 atype.eltype = parsetype;
1398 }
1399
1400 if (skip_past_char (&str, '[') == SUCCESS)
1401 {
1402 if (type != REG_TYPE_VFD)
1403 {
1404 first_error (_("only D registers may be indexed"));
1405 return FAIL;
1406 }
1407
1408 if ((atype.defined & NTA_HASINDEX) != 0)
1409 {
1410 first_error (_("can't change index for operand"));
1411 return FAIL;
1412 }
1413
1414 atype.defined |= NTA_HASINDEX;
1415
1416 if (skip_past_char (&str, ']') == SUCCESS)
1417 atype.index = NEON_ALL_LANES;
1418 else
1419 {
1420 expressionS exp;
1421
1422 my_get_expression (&exp, &str, GE_NO_PREFIX);
1423
1424 if (exp.X_op != O_constant)
1425 {
1426 first_error (_("constant expression required"));
1427 return FAIL;
1428 }
1429
1430 if (skip_past_char (&str, ']') == FAIL)
1431 return FAIL;
1432
1433 atype.index = exp.X_add_number;
1434 }
1435 }
1436
1437 if (typeinfo)
1438 *typeinfo = atype;
1439
1440 if (rtype)
1441 *rtype = type;
1442
1443 *ccp = str;
1444
1445 return reg->number;
1446 }
1447
1448 /* Like arm_reg_parse, but allow allow the following extra features:
1449 - If RTYPE is non-zero, return the (possibly restricted) type of the
1450 register (e.g. Neon double or quad reg when either has been requested).
1451 - If this is a Neon vector type with additional type information, fill
1452 in the struct pointed to by VECTYPE (if non-NULL).
1453 This function will fault on encountering a scalar. */
1454
1455 static int
1456 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1457 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1458 {
1459 struct neon_typed_alias atype;
1460 char *str = *ccp;
1461 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1462
1463 if (reg == FAIL)
1464 return FAIL;
1465
1466 /* Do not allow a scalar (reg+index) to parse as a register. */
1467 if ((atype.defined & NTA_HASINDEX) != 0)
1468 {
1469 first_error (_("register operand expected, but got scalar"));
1470 return FAIL;
1471 }
1472
1473 if (vectype)
1474 *vectype = atype.eltype;
1475
1476 *ccp = str;
1477
1478 return reg;
1479 }
1480
1481 #define NEON_SCALAR_REG(X) ((X) >> 4)
1482 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1483
1484 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1485 have enough information to be able to do a good job bounds-checking. So, we
1486 just do easy checks here, and do further checks later. */
1487
1488 static int
1489 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1490 {
1491 int reg;
1492 char *str = *ccp;
1493 struct neon_typed_alias atype;
1494
1495 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1496
1497 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1498 return FAIL;
1499
1500 if (atype.index == NEON_ALL_LANES)
1501 {
1502 first_error (_("scalar must have an index"));
1503 return FAIL;
1504 }
1505 else if (atype.index >= 64 / elsize)
1506 {
1507 first_error (_("scalar index out of range"));
1508 return FAIL;
1509 }
1510
1511 if (type)
1512 *type = atype.eltype;
1513
1514 *ccp = str;
1515
1516 return reg * 16 + atype.index;
1517 }
1518
1519 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1520
1521 static long
1522 parse_reg_list (char ** strp)
1523 {
1524 char * str = * strp;
1525 long range = 0;
1526 int another_range;
1527
1528 /* We come back here if we get ranges concatenated by '+' or '|'. */
1529 do
1530 {
1531 another_range = 0;
1532
1533 if (*str == '{')
1534 {
1535 int in_range = 0;
1536 int cur_reg = -1;
1537
1538 str++;
1539 do
1540 {
1541 int reg;
1542
1543 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1544 {
1545 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1546 return FAIL;
1547 }
1548
1549 if (in_range)
1550 {
1551 int i;
1552
1553 if (reg <= cur_reg)
1554 {
1555 first_error (_("bad range in register list"));
1556 return FAIL;
1557 }
1558
1559 for (i = cur_reg + 1; i < reg; i++)
1560 {
1561 if (range & (1 << i))
1562 as_tsktsk
1563 (_("Warning: duplicated register (r%d) in register list"),
1564 i);
1565 else
1566 range |= 1 << i;
1567 }
1568 in_range = 0;
1569 }
1570
1571 if (range & (1 << reg))
1572 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1573 reg);
1574 else if (reg <= cur_reg)
1575 as_tsktsk (_("Warning: register range not in ascending order"));
1576
1577 range |= 1 << reg;
1578 cur_reg = reg;
1579 }
1580 while (skip_past_comma (&str) != FAIL
1581 || (in_range = 1, *str++ == '-'));
1582 str--;
1583
1584 if (*str++ != '}')
1585 {
1586 first_error (_("missing `}'"));
1587 return FAIL;
1588 }
1589 }
1590 else
1591 {
1592 expressionS exp;
1593
1594 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1595 return FAIL;
1596
1597 if (exp.X_op == O_constant)
1598 {
1599 if (exp.X_add_number
1600 != (exp.X_add_number & 0x0000ffff))
1601 {
1602 inst.error = _("invalid register mask");
1603 return FAIL;
1604 }
1605
1606 if ((range & exp.X_add_number) != 0)
1607 {
1608 int regno = range & exp.X_add_number;
1609
1610 regno &= -regno;
1611 regno = (1 << regno) - 1;
1612 as_tsktsk
1613 (_("Warning: duplicated register (r%d) in register list"),
1614 regno);
1615 }
1616
1617 range |= exp.X_add_number;
1618 }
1619 else
1620 {
1621 if (inst.reloc.type != 0)
1622 {
1623 inst.error = _("expression too complex");
1624 return FAIL;
1625 }
1626
1627 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1628 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1629 inst.reloc.pc_rel = 0;
1630 }
1631 }
1632
1633 if (*str == '|' || *str == '+')
1634 {
1635 str++;
1636 another_range = 1;
1637 }
1638 }
1639 while (another_range);
1640
1641 *strp = str;
1642 return range;
1643 }
1644
1645 /* Types of registers in a list. */
1646
1647 enum reg_list_els
1648 {
1649 REGLIST_VFP_S,
1650 REGLIST_VFP_D,
1651 REGLIST_NEON_D
1652 };
1653
1654 /* Parse a VFP register list. If the string is invalid return FAIL.
1655 Otherwise return the number of registers, and set PBASE to the first
1656 register. Parses registers of type ETYPE.
1657 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1658 - Q registers can be used to specify pairs of D registers
1659 - { } can be omitted from around a singleton register list
1660 FIXME: This is not implemented, as it would require backtracking in
1661 some cases, e.g.:
1662 vtbl.8 d3,d4,d5
1663 This could be done (the meaning isn't really ambiguous), but doesn't
1664 fit in well with the current parsing framework.
1665 - 32 D registers may be used (also true for VFPv3).
1666 FIXME: Types are ignored in these register lists, which is probably a
1667 bug. */
1668
1669 static int
1670 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1671 {
1672 char *str = *ccp;
1673 int base_reg;
1674 int new_base;
1675 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1676 int max_regs = 0;
1677 int count = 0;
1678 int warned = 0;
1679 unsigned long mask = 0;
1680 int i;
1681
1682 if (*str != '{')
1683 {
1684 inst.error = _("expecting {");
1685 return FAIL;
1686 }
1687
1688 str++;
1689
1690 switch (etype)
1691 {
1692 case REGLIST_VFP_S:
1693 regtype = REG_TYPE_VFS;
1694 max_regs = 32;
1695 break;
1696
1697 case REGLIST_VFP_D:
1698 regtype = REG_TYPE_VFD;
1699 break;
1700
1701 case REGLIST_NEON_D:
1702 regtype = REG_TYPE_NDQ;
1703 break;
1704 }
1705
1706 if (etype != REGLIST_VFP_S)
1707 {
1708 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1709 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1710 {
1711 max_regs = 32;
1712 if (thumb_mode)
1713 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1714 fpu_vfp_ext_d32);
1715 else
1716 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1717 fpu_vfp_ext_d32);
1718 }
1719 else
1720 max_regs = 16;
1721 }
1722
1723 base_reg = max_regs;
1724
1725 do
1726 {
1727 int setmask = 1, addregs = 1;
1728
1729 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1730
1731 if (new_base == FAIL)
1732 {
1733 first_error (_(reg_expected_msgs[regtype]));
1734 return FAIL;
1735 }
1736
1737 if (new_base >= max_regs)
1738 {
1739 first_error (_("register out of range in list"));
1740 return FAIL;
1741 }
1742
1743 /* Note: a value of 2 * n is returned for the register Q<n>. */
1744 if (regtype == REG_TYPE_NQ)
1745 {
1746 setmask = 3;
1747 addregs = 2;
1748 }
1749
1750 if (new_base < base_reg)
1751 base_reg = new_base;
1752
1753 if (mask & (setmask << new_base))
1754 {
1755 first_error (_("invalid register list"));
1756 return FAIL;
1757 }
1758
1759 if ((mask >> new_base) != 0 && ! warned)
1760 {
1761 as_tsktsk (_("register list not in ascending order"));
1762 warned = 1;
1763 }
1764
1765 mask |= setmask << new_base;
1766 count += addregs;
1767
1768 if (*str == '-') /* We have the start of a range expression */
1769 {
1770 int high_range;
1771
1772 str++;
1773
1774 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1775 == FAIL)
1776 {
1777 inst.error = gettext (reg_expected_msgs[regtype]);
1778 return FAIL;
1779 }
1780
1781 if (high_range >= max_regs)
1782 {
1783 first_error (_("register out of range in list"));
1784 return FAIL;
1785 }
1786
1787 if (regtype == REG_TYPE_NQ)
1788 high_range = high_range + 1;
1789
1790 if (high_range <= new_base)
1791 {
1792 inst.error = _("register range not in ascending order");
1793 return FAIL;
1794 }
1795
1796 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1797 {
1798 if (mask & (setmask << new_base))
1799 {
1800 inst.error = _("invalid register list");
1801 return FAIL;
1802 }
1803
1804 mask |= setmask << new_base;
1805 count += addregs;
1806 }
1807 }
1808 }
1809 while (skip_past_comma (&str) != FAIL);
1810
1811 str++;
1812
1813 /* Sanity check -- should have raised a parse error above. */
1814 if (count == 0 || count > max_regs)
1815 abort ();
1816
1817 *pbase = base_reg;
1818
1819 /* Final test -- the registers must be consecutive. */
1820 mask >>= base_reg;
1821 for (i = 0; i < count; i++)
1822 {
1823 if ((mask & (1u << i)) == 0)
1824 {
1825 inst.error = _("non-contiguous register range");
1826 return FAIL;
1827 }
1828 }
1829
1830 *ccp = str;
1831
1832 return count;
1833 }
1834
1835 /* True if two alias types are the same. */
1836
1837 static bfd_boolean
1838 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1839 {
1840 if (!a && !b)
1841 return TRUE;
1842
1843 if (!a || !b)
1844 return FALSE;
1845
1846 if (a->defined != b->defined)
1847 return FALSE;
1848
1849 if ((a->defined & NTA_HASTYPE) != 0
1850 && (a->eltype.type != b->eltype.type
1851 || a->eltype.size != b->eltype.size))
1852 return FALSE;
1853
1854 if ((a->defined & NTA_HASINDEX) != 0
1855 && (a->index != b->index))
1856 return FALSE;
1857
1858 return TRUE;
1859 }
1860
1861 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1862 The base register is put in *PBASE.
1863 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1864 the return value.
1865 The register stride (minus one) is put in bit 4 of the return value.
1866 Bits [6:5] encode the list length (minus one).
1867 The type of the list elements is put in *ELTYPE, if non-NULL. */
1868
1869 #define NEON_LANE(X) ((X) & 0xf)
1870 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1871 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1872
1873 static int
1874 parse_neon_el_struct_list (char **str, unsigned *pbase,
1875 struct neon_type_el *eltype)
1876 {
1877 char *ptr = *str;
1878 int base_reg = -1;
1879 int reg_incr = -1;
1880 int count = 0;
1881 int lane = -1;
1882 int leading_brace = 0;
1883 enum arm_reg_type rtype = REG_TYPE_NDQ;
1884 int addregs = 1;
1885 const char *const incr_error = _("register stride must be 1 or 2");
1886 const char *const type_error = _("mismatched element/structure types in list");
1887 struct neon_typed_alias firsttype;
1888
1889 if (skip_past_char (&ptr, '{') == SUCCESS)
1890 leading_brace = 1;
1891
1892 do
1893 {
1894 struct neon_typed_alias atype;
1895 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1896
1897 if (getreg == FAIL)
1898 {
1899 first_error (_(reg_expected_msgs[rtype]));
1900 return FAIL;
1901 }
1902
1903 if (base_reg == -1)
1904 {
1905 base_reg = getreg;
1906 if (rtype == REG_TYPE_NQ)
1907 {
1908 reg_incr = 1;
1909 addregs = 2;
1910 }
1911 firsttype = atype;
1912 }
1913 else if (reg_incr == -1)
1914 {
1915 reg_incr = getreg - base_reg;
1916 if (reg_incr < 1 || reg_incr > 2)
1917 {
1918 first_error (_(incr_error));
1919 return FAIL;
1920 }
1921 }
1922 else if (getreg != base_reg + reg_incr * count)
1923 {
1924 first_error (_(incr_error));
1925 return FAIL;
1926 }
1927
1928 if (! neon_alias_types_same (&atype, &firsttype))
1929 {
1930 first_error (_(type_error));
1931 return FAIL;
1932 }
1933
1934 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1935 modes. */
1936 if (ptr[0] == '-')
1937 {
1938 struct neon_typed_alias htype;
1939 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1940 if (lane == -1)
1941 lane = NEON_INTERLEAVE_LANES;
1942 else if (lane != NEON_INTERLEAVE_LANES)
1943 {
1944 first_error (_(type_error));
1945 return FAIL;
1946 }
1947 if (reg_incr == -1)
1948 reg_incr = 1;
1949 else if (reg_incr != 1)
1950 {
1951 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1952 return FAIL;
1953 }
1954 ptr++;
1955 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1956 if (hireg == FAIL)
1957 {
1958 first_error (_(reg_expected_msgs[rtype]));
1959 return FAIL;
1960 }
1961 if (! neon_alias_types_same (&htype, &firsttype))
1962 {
1963 first_error (_(type_error));
1964 return FAIL;
1965 }
1966 count += hireg + dregs - getreg;
1967 continue;
1968 }
1969
1970 /* If we're using Q registers, we can't use [] or [n] syntax. */
1971 if (rtype == REG_TYPE_NQ)
1972 {
1973 count += 2;
1974 continue;
1975 }
1976
1977 if ((atype.defined & NTA_HASINDEX) != 0)
1978 {
1979 if (lane == -1)
1980 lane = atype.index;
1981 else if (lane != atype.index)
1982 {
1983 first_error (_(type_error));
1984 return FAIL;
1985 }
1986 }
1987 else if (lane == -1)
1988 lane = NEON_INTERLEAVE_LANES;
1989 else if (lane != NEON_INTERLEAVE_LANES)
1990 {
1991 first_error (_(type_error));
1992 return FAIL;
1993 }
1994 count++;
1995 }
1996 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1997
1998 /* No lane set by [x]. We must be interleaving structures. */
1999 if (lane == -1)
2000 lane = NEON_INTERLEAVE_LANES;
2001
2002 /* Sanity check. */
2003 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2004 || (count > 1 && reg_incr == -1))
2005 {
2006 first_error (_("error parsing element/structure list"));
2007 return FAIL;
2008 }
2009
2010 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2011 {
2012 first_error (_("expected }"));
2013 return FAIL;
2014 }
2015
2016 if (reg_incr == -1)
2017 reg_incr = 1;
2018
2019 if (eltype)
2020 *eltype = firsttype.eltype;
2021
2022 *pbase = base_reg;
2023 *str = ptr;
2024
2025 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2026 }
2027
2028 /* Parse an explicit relocation suffix on an expression. This is
2029 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2030 arm_reloc_hsh contains no entries, so this function can only
2031 succeed if there is no () after the word. Returns -1 on error,
2032 BFD_RELOC_UNUSED if there wasn't any suffix. */
2033 static int
2034 parse_reloc (char **str)
2035 {
2036 struct reloc_entry *r;
2037 char *p, *q;
2038
2039 if (**str != '(')
2040 return BFD_RELOC_UNUSED;
2041
2042 p = *str + 1;
2043 q = p;
2044
2045 while (*q && *q != ')' && *q != ',')
2046 q++;
2047 if (*q != ')')
2048 return -1;
2049
2050 if ((r = (struct reloc_entry *)
2051 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2052 return -1;
2053
2054 *str = q + 1;
2055 return r->reloc;
2056 }
2057
2058 /* Directives: register aliases. */
2059
2060 static struct reg_entry *
2061 insert_reg_alias (char *str, int number, int type)
2062 {
2063 struct reg_entry *new_reg;
2064 const char *name;
2065
2066 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2067 {
2068 if (new_reg->builtin)
2069 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2070
2071 /* Only warn about a redefinition if it's not defined as the
2072 same register. */
2073 else if (new_reg->number != number || new_reg->type != type)
2074 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2075
2076 return NULL;
2077 }
2078
2079 name = xstrdup (str);
2080 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2081
2082 new_reg->name = name;
2083 new_reg->number = number;
2084 new_reg->type = type;
2085 new_reg->builtin = FALSE;
2086 new_reg->neon = NULL;
2087
2088 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2089 abort ();
2090
2091 return new_reg;
2092 }
2093
2094 static void
2095 insert_neon_reg_alias (char *str, int number, int type,
2096 struct neon_typed_alias *atype)
2097 {
2098 struct reg_entry *reg = insert_reg_alias (str, number, type);
2099
2100 if (!reg)
2101 {
2102 first_error (_("attempt to redefine typed alias"));
2103 return;
2104 }
2105
2106 if (atype)
2107 {
2108 reg->neon = (struct neon_typed_alias *)
2109 xmalloc (sizeof (struct neon_typed_alias));
2110 *reg->neon = *atype;
2111 }
2112 }
2113
2114 /* Look for the .req directive. This is of the form:
2115
2116 new_register_name .req existing_register_name
2117
2118 If we find one, or if it looks sufficiently like one that we want to
2119 handle any error here, return TRUE. Otherwise return FALSE. */
2120
2121 static bfd_boolean
2122 create_register_alias (char * newname, char *p)
2123 {
2124 struct reg_entry *old;
2125 char *oldname, *nbuf;
2126 size_t nlen;
2127
2128 /* The input scrubber ensures that whitespace after the mnemonic is
2129 collapsed to single spaces. */
2130 oldname = p;
2131 if (strncmp (oldname, " .req ", 6) != 0)
2132 return FALSE;
2133
2134 oldname += 6;
2135 if (*oldname == '\0')
2136 return FALSE;
2137
2138 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2139 if (!old)
2140 {
2141 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2142 return TRUE;
2143 }
2144
2145 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2146 the desired alias name, and p points to its end. If not, then
2147 the desired alias name is in the global original_case_string. */
2148 #ifdef TC_CASE_SENSITIVE
2149 nlen = p - newname;
2150 #else
2151 newname = original_case_string;
2152 nlen = strlen (newname);
2153 #endif
2154
2155 nbuf = (char *) alloca (nlen + 1);
2156 memcpy (nbuf, newname, nlen);
2157 nbuf[nlen] = '\0';
2158
2159 /* Create aliases under the new name as stated; an all-lowercase
2160 version of the new name; and an all-uppercase version of the new
2161 name. */
2162 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2163 {
2164 for (p = nbuf; *p; p++)
2165 *p = TOUPPER (*p);
2166
2167 if (strncmp (nbuf, newname, nlen))
2168 {
2169 /* If this attempt to create an additional alias fails, do not bother
2170 trying to create the all-lower case alias. We will fail and issue
2171 a second, duplicate error message. This situation arises when the
2172 programmer does something like:
2173 foo .req r0
2174 Foo .req r1
2175 The second .req creates the "Foo" alias but then fails to create
2176 the artificial FOO alias because it has already been created by the
2177 first .req. */
2178 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2179 return TRUE;
2180 }
2181
2182 for (p = nbuf; *p; p++)
2183 *p = TOLOWER (*p);
2184
2185 if (strncmp (nbuf, newname, nlen))
2186 insert_reg_alias (nbuf, old->number, old->type);
2187 }
2188
2189 return TRUE;
2190 }
2191
2192 /* Create a Neon typed/indexed register alias using directives, e.g.:
2193 X .dn d5.s32[1]
2194 Y .qn 6.s16
2195 Z .dn d7
2196 T .dn Z[0]
2197 These typed registers can be used instead of the types specified after the
2198 Neon mnemonic, so long as all operands given have types. Types can also be
2199 specified directly, e.g.:
2200 vadd d0.s32, d1.s32, d2.s32 */
2201
2202 static bfd_boolean
2203 create_neon_reg_alias (char *newname, char *p)
2204 {
2205 enum arm_reg_type basetype;
2206 struct reg_entry *basereg;
2207 struct reg_entry mybasereg;
2208 struct neon_type ntype;
2209 struct neon_typed_alias typeinfo;
2210 char *namebuf, *nameend;
2211 int namelen;
2212
2213 typeinfo.defined = 0;
2214 typeinfo.eltype.type = NT_invtype;
2215 typeinfo.eltype.size = -1;
2216 typeinfo.index = -1;
2217
2218 nameend = p;
2219
2220 if (strncmp (p, " .dn ", 5) == 0)
2221 basetype = REG_TYPE_VFD;
2222 else if (strncmp (p, " .qn ", 5) == 0)
2223 basetype = REG_TYPE_NQ;
2224 else
2225 return FALSE;
2226
2227 p += 5;
2228
2229 if (*p == '\0')
2230 return FALSE;
2231
2232 basereg = arm_reg_parse_multi (&p);
2233
2234 if (basereg && basereg->type != basetype)
2235 {
2236 as_bad (_("bad type for register"));
2237 return FALSE;
2238 }
2239
2240 if (basereg == NULL)
2241 {
2242 expressionS exp;
2243 /* Try parsing as an integer. */
2244 my_get_expression (&exp, &p, GE_NO_PREFIX);
2245 if (exp.X_op != O_constant)
2246 {
2247 as_bad (_("expression must be constant"));
2248 return FALSE;
2249 }
2250 basereg = &mybasereg;
2251 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2252 : exp.X_add_number;
2253 basereg->neon = 0;
2254 }
2255
2256 if (basereg->neon)
2257 typeinfo = *basereg->neon;
2258
2259 if (parse_neon_type (&ntype, &p) == SUCCESS)
2260 {
2261 /* We got a type. */
2262 if (typeinfo.defined & NTA_HASTYPE)
2263 {
2264 as_bad (_("can't redefine the type of a register alias"));
2265 return FALSE;
2266 }
2267
2268 typeinfo.defined |= NTA_HASTYPE;
2269 if (ntype.elems != 1)
2270 {
2271 as_bad (_("you must specify a single type only"));
2272 return FALSE;
2273 }
2274 typeinfo.eltype = ntype.el[0];
2275 }
2276
2277 if (skip_past_char (&p, '[') == SUCCESS)
2278 {
2279 expressionS exp;
2280 /* We got a scalar index. */
2281
2282 if (typeinfo.defined & NTA_HASINDEX)
2283 {
2284 as_bad (_("can't redefine the index of a scalar alias"));
2285 return FALSE;
2286 }
2287
2288 my_get_expression (&exp, &p, GE_NO_PREFIX);
2289
2290 if (exp.X_op != O_constant)
2291 {
2292 as_bad (_("scalar index must be constant"));
2293 return FALSE;
2294 }
2295
2296 typeinfo.defined |= NTA_HASINDEX;
2297 typeinfo.index = exp.X_add_number;
2298
2299 if (skip_past_char (&p, ']') == FAIL)
2300 {
2301 as_bad (_("expecting ]"));
2302 return FALSE;
2303 }
2304 }
2305
2306 namelen = nameend - newname;
2307 namebuf = (char *) alloca (namelen + 1);
2308 strncpy (namebuf, newname, namelen);
2309 namebuf[namelen] = '\0';
2310
2311 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2312 typeinfo.defined != 0 ? &typeinfo : NULL);
2313
2314 /* Insert name in all uppercase. */
2315 for (p = namebuf; *p; p++)
2316 *p = TOUPPER (*p);
2317
2318 if (strncmp (namebuf, newname, namelen))
2319 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2320 typeinfo.defined != 0 ? &typeinfo : NULL);
2321
2322 /* Insert name in all lowercase. */
2323 for (p = namebuf; *p; p++)
2324 *p = TOLOWER (*p);
2325
2326 if (strncmp (namebuf, newname, namelen))
2327 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2328 typeinfo.defined != 0 ? &typeinfo : NULL);
2329
2330 return TRUE;
2331 }
2332
2333 /* Should never be called, as .req goes between the alias and the
2334 register name, not at the beginning of the line. */
2335
2336 static void
2337 s_req (int a ATTRIBUTE_UNUSED)
2338 {
2339 as_bad (_("invalid syntax for .req directive"));
2340 }
2341
2342 static void
2343 s_dn (int a ATTRIBUTE_UNUSED)
2344 {
2345 as_bad (_("invalid syntax for .dn directive"));
2346 }
2347
2348 static void
2349 s_qn (int a ATTRIBUTE_UNUSED)
2350 {
2351 as_bad (_("invalid syntax for .qn directive"));
2352 }
2353
2354 /* The .unreq directive deletes an alias which was previously defined
2355 by .req. For example:
2356
2357 my_alias .req r11
2358 .unreq my_alias */
2359
2360 static void
2361 s_unreq (int a ATTRIBUTE_UNUSED)
2362 {
2363 char * name;
2364 char saved_char;
2365
2366 name = input_line_pointer;
2367
2368 while (*input_line_pointer != 0
2369 && *input_line_pointer != ' '
2370 && *input_line_pointer != '\n')
2371 ++input_line_pointer;
2372
2373 saved_char = *input_line_pointer;
2374 *input_line_pointer = 0;
2375
2376 if (!*name)
2377 as_bad (_("invalid syntax for .unreq directive"));
2378 else
2379 {
2380 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2381 name);
2382
2383 if (!reg)
2384 as_bad (_("unknown register alias '%s'"), name);
2385 else if (reg->builtin)
2386 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2387 name);
2388 else
2389 {
2390 char * p;
2391 char * nbuf;
2392
2393 hash_delete (arm_reg_hsh, name, FALSE);
2394 free ((char *) reg->name);
2395 if (reg->neon)
2396 free (reg->neon);
2397 free (reg);
2398
2399 /* Also locate the all upper case and all lower case versions.
2400 Do not complain if we cannot find one or the other as it
2401 was probably deleted above. */
2402
2403 nbuf = strdup (name);
2404 for (p = nbuf; *p; p++)
2405 *p = TOUPPER (*p);
2406 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2407 if (reg)
2408 {
2409 hash_delete (arm_reg_hsh, nbuf, FALSE);
2410 free ((char *) reg->name);
2411 if (reg->neon)
2412 free (reg->neon);
2413 free (reg);
2414 }
2415
2416 for (p = nbuf; *p; p++)
2417 *p = TOLOWER (*p);
2418 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2419 if (reg)
2420 {
2421 hash_delete (arm_reg_hsh, nbuf, FALSE);
2422 free ((char *) reg->name);
2423 if (reg->neon)
2424 free (reg->neon);
2425 free (reg);
2426 }
2427
2428 free (nbuf);
2429 }
2430 }
2431
2432 *input_line_pointer = saved_char;
2433 demand_empty_rest_of_line ();
2434 }
2435
2436 /* Directives: Instruction set selection. */
2437
2438 #ifdef OBJ_ELF
2439 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2440 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2441 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2442 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2443
2444 /* Create a new mapping symbol for the transition to STATE. */
2445
2446 static void
2447 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2448 {
2449 symbolS * symbolP;
2450 const char * symname;
2451 int type;
2452
2453 switch (state)
2454 {
2455 case MAP_DATA:
2456 symname = "$d";
2457 type = BSF_NO_FLAGS;
2458 break;
2459 case MAP_ARM:
2460 symname = "$a";
2461 type = BSF_NO_FLAGS;
2462 break;
2463 case MAP_THUMB:
2464 symname = "$t";
2465 type = BSF_NO_FLAGS;
2466 break;
2467 default:
2468 abort ();
2469 }
2470
2471 symbolP = symbol_new (symname, now_seg, value, frag);
2472 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2473
2474 switch (state)
2475 {
2476 case MAP_ARM:
2477 THUMB_SET_FUNC (symbolP, 0);
2478 ARM_SET_THUMB (symbolP, 0);
2479 ARM_SET_INTERWORK (symbolP, support_interwork);
2480 break;
2481
2482 case MAP_THUMB:
2483 THUMB_SET_FUNC (symbolP, 1);
2484 ARM_SET_THUMB (symbolP, 1);
2485 ARM_SET_INTERWORK (symbolP, support_interwork);
2486 break;
2487
2488 case MAP_DATA:
2489 default:
2490 break;
2491 }
2492
2493 /* Save the mapping symbols for future reference. Also check that
2494 we do not place two mapping symbols at the same offset within a
2495 frag. We'll handle overlap between frags in
2496 check_mapping_symbols. */
2497 if (value == 0)
2498 {
2499 know (frag->tc_frag_data.first_map == NULL);
2500 frag->tc_frag_data.first_map = symbolP;
2501 }
2502 if (frag->tc_frag_data.last_map != NULL)
2503 {
2504 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2505 /* If .fill or other data filling directive generates zero sized data,
2506 the mapping symbol for the following code will have the same value
2507 as the one generated for the data filling directive. In this case,
2508 we replace the old symbol with the new one at the same address. */
2509 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2510 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2511 }
2512 frag->tc_frag_data.last_map = symbolP;
2513 }
2514
2515 /* We must sometimes convert a region marked as code to data during
2516 code alignment, if an odd number of bytes have to be padded. The
2517 code mapping symbol is pushed to an aligned address. */
2518
2519 static void
2520 insert_data_mapping_symbol (enum mstate state,
2521 valueT value, fragS *frag, offsetT bytes)
2522 {
2523 /* If there was already a mapping symbol, remove it. */
2524 if (frag->tc_frag_data.last_map != NULL
2525 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2526 {
2527 symbolS *symp = frag->tc_frag_data.last_map;
2528
2529 if (value == 0)
2530 {
2531 know (frag->tc_frag_data.first_map == symp);
2532 frag->tc_frag_data.first_map = NULL;
2533 }
2534 frag->tc_frag_data.last_map = NULL;
2535 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2536 }
2537
2538 make_mapping_symbol (MAP_DATA, value, frag);
2539 make_mapping_symbol (state, value + bytes, frag);
2540 }
2541
2542 static void mapping_state_2 (enum mstate state, int max_chars);
2543
2544 /* Set the mapping state to STATE. Only call this when about to
2545 emit some STATE bytes to the file. */
2546
2547 void
2548 mapping_state (enum mstate state)
2549 {
2550 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2551
2552 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2553
2554 if (mapstate == state)
2555 /* The mapping symbol has already been emitted.
2556 There is nothing else to do. */
2557 return;
2558 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2559 /* This case will be evaluated later in the next else. */
2560 return;
2561 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2562 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2563 {
2564 /* Only add the symbol if the offset is > 0:
2565 if we're at the first frag, check it's size > 0;
2566 if we're not at the first frag, then for sure
2567 the offset is > 0. */
2568 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2569 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2570
2571 if (add_symbol)
2572 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2573 }
2574
2575 mapping_state_2 (state, 0);
2576 #undef TRANSITION
2577 }
2578
2579 /* Same as mapping_state, but MAX_CHARS bytes have already been
2580 allocated. Put the mapping symbol that far back. */
2581
2582 static void
2583 mapping_state_2 (enum mstate state, int max_chars)
2584 {
2585 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2586
2587 if (!SEG_NORMAL (now_seg))
2588 return;
2589
2590 if (mapstate == state)
2591 /* The mapping symbol has already been emitted.
2592 There is nothing else to do. */
2593 return;
2594
2595 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2596 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2597 }
2598 #else
2599 #define mapping_state(x) ((void)0)
2600 #define mapping_state_2(x, y) ((void)0)
2601 #endif
2602
2603 /* Find the real, Thumb encoded start of a Thumb function. */
2604
2605 #ifdef OBJ_COFF
2606 static symbolS *
2607 find_real_start (symbolS * symbolP)
2608 {
2609 char * real_start;
2610 const char * name = S_GET_NAME (symbolP);
2611 symbolS * new_target;
2612
2613 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2614 #define STUB_NAME ".real_start_of"
2615
2616 if (name == NULL)
2617 abort ();
2618
2619 /* The compiler may generate BL instructions to local labels because
2620 it needs to perform a branch to a far away location. These labels
2621 do not have a corresponding ".real_start_of" label. We check
2622 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2623 the ".real_start_of" convention for nonlocal branches. */
2624 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2625 return symbolP;
2626
2627 real_start = ACONCAT ((STUB_NAME, name, NULL));
2628 new_target = symbol_find (real_start);
2629
2630 if (new_target == NULL)
2631 {
2632 as_warn (_("Failed to find real start of function: %s\n"), name);
2633 new_target = symbolP;
2634 }
2635
2636 return new_target;
2637 }
2638 #endif
2639
2640 static void
2641 opcode_select (int width)
2642 {
2643 switch (width)
2644 {
2645 case 16:
2646 if (! thumb_mode)
2647 {
2648 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2649 as_bad (_("selected processor does not support THUMB opcodes"));
2650
2651 thumb_mode = 1;
2652 /* No need to force the alignment, since we will have been
2653 coming from ARM mode, which is word-aligned. */
2654 record_alignment (now_seg, 1);
2655 }
2656 break;
2657
2658 case 32:
2659 if (thumb_mode)
2660 {
2661 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2662 as_bad (_("selected processor does not support ARM opcodes"));
2663
2664 thumb_mode = 0;
2665
2666 if (!need_pass_2)
2667 frag_align (2, 0, 0);
2668
2669 record_alignment (now_seg, 1);
2670 }
2671 break;
2672
2673 default:
2674 as_bad (_("invalid instruction size selected (%d)"), width);
2675 }
2676 }
2677
2678 static void
2679 s_arm (int ignore ATTRIBUTE_UNUSED)
2680 {
2681 opcode_select (32);
2682 demand_empty_rest_of_line ();
2683 }
2684
2685 static void
2686 s_thumb (int ignore ATTRIBUTE_UNUSED)
2687 {
2688 opcode_select (16);
2689 demand_empty_rest_of_line ();
2690 }
2691
2692 static void
2693 s_code (int unused ATTRIBUTE_UNUSED)
2694 {
2695 int temp;
2696
2697 temp = get_absolute_expression ();
2698 switch (temp)
2699 {
2700 case 16:
2701 case 32:
2702 opcode_select (temp);
2703 break;
2704
2705 default:
2706 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2707 }
2708 }
2709
2710 static void
2711 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2712 {
2713 /* If we are not already in thumb mode go into it, EVEN if
2714 the target processor does not support thumb instructions.
2715 This is used by gcc/config/arm/lib1funcs.asm for example
2716 to compile interworking support functions even if the
2717 target processor should not support interworking. */
2718 if (! thumb_mode)
2719 {
2720 thumb_mode = 2;
2721 record_alignment (now_seg, 1);
2722 }
2723
2724 demand_empty_rest_of_line ();
2725 }
2726
2727 static void
2728 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2729 {
2730 s_thumb (0);
2731
2732 /* The following label is the name/address of the start of a Thumb function.
2733 We need to know this for the interworking support. */
2734 label_is_thumb_function_name = TRUE;
2735 }
2736
2737 /* Perform a .set directive, but also mark the alias as
2738 being a thumb function. */
2739
2740 static void
2741 s_thumb_set (int equiv)
2742 {
2743 /* XXX the following is a duplicate of the code for s_set() in read.c
2744 We cannot just call that code as we need to get at the symbol that
2745 is created. */
2746 char * name;
2747 char delim;
2748 char * end_name;
2749 symbolS * symbolP;
2750
2751 /* Especial apologies for the random logic:
2752 This just grew, and could be parsed much more simply!
2753 Dean - in haste. */
2754 name = input_line_pointer;
2755 delim = get_symbol_end ();
2756 end_name = input_line_pointer;
2757 *end_name = delim;
2758
2759 if (*input_line_pointer != ',')
2760 {
2761 *end_name = 0;
2762 as_bad (_("expected comma after name \"%s\""), name);
2763 *end_name = delim;
2764 ignore_rest_of_line ();
2765 return;
2766 }
2767
2768 input_line_pointer++;
2769 *end_name = 0;
2770
2771 if (name[0] == '.' && name[1] == '\0')
2772 {
2773 /* XXX - this should not happen to .thumb_set. */
2774 abort ();
2775 }
2776
2777 if ((symbolP = symbol_find (name)) == NULL
2778 && (symbolP = md_undefined_symbol (name)) == NULL)
2779 {
2780 #ifndef NO_LISTING
2781 /* When doing symbol listings, play games with dummy fragments living
2782 outside the normal fragment chain to record the file and line info
2783 for this symbol. */
2784 if (listing & LISTING_SYMBOLS)
2785 {
2786 extern struct list_info_struct * listing_tail;
2787 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2788
2789 memset (dummy_frag, 0, sizeof (fragS));
2790 dummy_frag->fr_type = rs_fill;
2791 dummy_frag->line = listing_tail;
2792 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2793 dummy_frag->fr_symbol = symbolP;
2794 }
2795 else
2796 #endif
2797 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2798
2799 #ifdef OBJ_COFF
2800 /* "set" symbols are local unless otherwise specified. */
2801 SF_SET_LOCAL (symbolP);
2802 #endif /* OBJ_COFF */
2803 } /* Make a new symbol. */
2804
2805 symbol_table_insert (symbolP);
2806
2807 * end_name = delim;
2808
2809 if (equiv
2810 && S_IS_DEFINED (symbolP)
2811 && S_GET_SEGMENT (symbolP) != reg_section)
2812 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2813
2814 pseudo_set (symbolP);
2815
2816 demand_empty_rest_of_line ();
2817
2818 /* XXX Now we come to the Thumb specific bit of code. */
2819
2820 THUMB_SET_FUNC (symbolP, 1);
2821 ARM_SET_THUMB (symbolP, 1);
2822 #if defined OBJ_ELF || defined OBJ_COFF
2823 ARM_SET_INTERWORK (symbolP, support_interwork);
2824 #endif
2825 }
2826
2827 /* Directives: Mode selection. */
2828
2829 /* .syntax [unified|divided] - choose the new unified syntax
2830 (same for Arm and Thumb encoding, modulo slight differences in what
2831 can be represented) or the old divergent syntax for each mode. */
2832 static void
2833 s_syntax (int unused ATTRIBUTE_UNUSED)
2834 {
2835 char *name, delim;
2836
2837 name = input_line_pointer;
2838 delim = get_symbol_end ();
2839
2840 if (!strcasecmp (name, "unified"))
2841 unified_syntax = TRUE;
2842 else if (!strcasecmp (name, "divided"))
2843 unified_syntax = FALSE;
2844 else
2845 {
2846 as_bad (_("unrecognized syntax mode \"%s\""), name);
2847 return;
2848 }
2849 *input_line_pointer = delim;
2850 demand_empty_rest_of_line ();
2851 }
2852
2853 /* Directives: sectioning and alignment. */
2854
2855 /* Same as s_align_ptwo but align 0 => align 2. */
2856
2857 static void
2858 s_align (int unused ATTRIBUTE_UNUSED)
2859 {
2860 int temp;
2861 bfd_boolean fill_p;
2862 long temp_fill;
2863 long max_alignment = 15;
2864
2865 temp = get_absolute_expression ();
2866 if (temp > max_alignment)
2867 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2868 else if (temp < 0)
2869 {
2870 as_bad (_("alignment negative. 0 assumed."));
2871 temp = 0;
2872 }
2873
2874 if (*input_line_pointer == ',')
2875 {
2876 input_line_pointer++;
2877 temp_fill = get_absolute_expression ();
2878 fill_p = TRUE;
2879 }
2880 else
2881 {
2882 fill_p = FALSE;
2883 temp_fill = 0;
2884 }
2885
2886 if (!temp)
2887 temp = 2;
2888
2889 /* Only make a frag if we HAVE to. */
2890 if (temp && !need_pass_2)
2891 {
2892 if (!fill_p && subseg_text_p (now_seg))
2893 frag_align_code (temp, 0);
2894 else
2895 frag_align (temp, (int) temp_fill, 0);
2896 }
2897 demand_empty_rest_of_line ();
2898
2899 record_alignment (now_seg, temp);
2900 }
2901
2902 static void
2903 s_bss (int ignore ATTRIBUTE_UNUSED)
2904 {
2905 /* We don't support putting frags in the BSS segment, we fake it by
2906 marking in_bss, then looking at s_skip for clues. */
2907 subseg_set (bss_section, 0);
2908 demand_empty_rest_of_line ();
2909
2910 #ifdef md_elf_section_change_hook
2911 md_elf_section_change_hook ();
2912 #endif
2913 }
2914
2915 static void
2916 s_even (int ignore ATTRIBUTE_UNUSED)
2917 {
2918 /* Never make frag if expect extra pass. */
2919 if (!need_pass_2)
2920 frag_align (1, 0, 0);
2921
2922 record_alignment (now_seg, 1);
2923
2924 demand_empty_rest_of_line ();
2925 }
2926
2927 /* Directives: Literal pools. */
2928
2929 static literal_pool *
2930 find_literal_pool (void)
2931 {
2932 literal_pool * pool;
2933
2934 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2935 {
2936 if (pool->section == now_seg
2937 && pool->sub_section == now_subseg)
2938 break;
2939 }
2940
2941 return pool;
2942 }
2943
2944 static literal_pool *
2945 find_or_make_literal_pool (void)
2946 {
2947 /* Next literal pool ID number. */
2948 static unsigned int latest_pool_num = 1;
2949 literal_pool * pool;
2950
2951 pool = find_literal_pool ();
2952
2953 if (pool == NULL)
2954 {
2955 /* Create a new pool. */
2956 pool = (literal_pool *) xmalloc (sizeof (* pool));
2957 if (! pool)
2958 return NULL;
2959
2960 pool->next_free_entry = 0;
2961 pool->section = now_seg;
2962 pool->sub_section = now_subseg;
2963 pool->next = list_of_pools;
2964 pool->symbol = NULL;
2965
2966 /* Add it to the list. */
2967 list_of_pools = pool;
2968 }
2969
2970 /* New pools, and emptied pools, will have a NULL symbol. */
2971 if (pool->symbol == NULL)
2972 {
2973 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2974 (valueT) 0, &zero_address_frag);
2975 pool->id = latest_pool_num ++;
2976 }
2977
2978 /* Done. */
2979 return pool;
2980 }
2981
2982 /* Add the literal in the global 'inst'
2983 structure to the relevant literal pool. */
2984
2985 static int
2986 add_to_lit_pool (void)
2987 {
2988 literal_pool * pool;
2989 unsigned int entry;
2990
2991 pool = find_or_make_literal_pool ();
2992
2993 /* Check if this literal value is already in the pool. */
2994 for (entry = 0; entry < pool->next_free_entry; entry ++)
2995 {
2996 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2997 && (inst.reloc.exp.X_op == O_constant)
2998 && (pool->literals[entry].X_add_number
2999 == inst.reloc.exp.X_add_number)
3000 && (pool->literals[entry].X_unsigned
3001 == inst.reloc.exp.X_unsigned))
3002 break;
3003
3004 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3005 && (inst.reloc.exp.X_op == O_symbol)
3006 && (pool->literals[entry].X_add_number
3007 == inst.reloc.exp.X_add_number)
3008 && (pool->literals[entry].X_add_symbol
3009 == inst.reloc.exp.X_add_symbol)
3010 && (pool->literals[entry].X_op_symbol
3011 == inst.reloc.exp.X_op_symbol))
3012 break;
3013 }
3014
3015 /* Do we need to create a new entry? */
3016 if (entry == pool->next_free_entry)
3017 {
3018 if (entry >= MAX_LITERAL_POOL_SIZE)
3019 {
3020 inst.error = _("literal pool overflow");
3021 return FAIL;
3022 }
3023
3024 pool->literals[entry] = inst.reloc.exp;
3025 pool->next_free_entry += 1;
3026 }
3027
3028 inst.reloc.exp.X_op = O_symbol;
3029 inst.reloc.exp.X_add_number = ((int) entry) * 4;
3030 inst.reloc.exp.X_add_symbol = pool->symbol;
3031
3032 return SUCCESS;
3033 }
3034
3035 /* Can't use symbol_new here, so have to create a symbol and then at
3036 a later date assign it a value. Thats what these functions do. */
3037
3038 static void
3039 symbol_locate (symbolS * symbolP,
3040 const char * name, /* It is copied, the caller can modify. */
3041 segT segment, /* Segment identifier (SEG_<something>). */
3042 valueT valu, /* Symbol value. */
3043 fragS * frag) /* Associated fragment. */
3044 {
3045 unsigned int name_length;
3046 char * preserved_copy_of_name;
3047
3048 name_length = strlen (name) + 1; /* +1 for \0. */
3049 obstack_grow (&notes, name, name_length);
3050 preserved_copy_of_name = (char *) obstack_finish (&notes);
3051
3052 #ifdef tc_canonicalize_symbol_name
3053 preserved_copy_of_name =
3054 tc_canonicalize_symbol_name (preserved_copy_of_name);
3055 #endif
3056
3057 S_SET_NAME (symbolP, preserved_copy_of_name);
3058
3059 S_SET_SEGMENT (symbolP, segment);
3060 S_SET_VALUE (symbolP, valu);
3061 symbol_clear_list_pointers (symbolP);
3062
3063 symbol_set_frag (symbolP, frag);
3064
3065 /* Link to end of symbol chain. */
3066 {
3067 extern int symbol_table_frozen;
3068
3069 if (symbol_table_frozen)
3070 abort ();
3071 }
3072
3073 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3074
3075 obj_symbol_new_hook (symbolP);
3076
3077 #ifdef tc_symbol_new_hook
3078 tc_symbol_new_hook (symbolP);
3079 #endif
3080
3081 #ifdef DEBUG_SYMS
3082 verify_symbol_chain (symbol_rootP, symbol_lastP);
3083 #endif /* DEBUG_SYMS */
3084 }
3085
3086
3087 static void
3088 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3089 {
3090 unsigned int entry;
3091 literal_pool * pool;
3092 char sym_name[20];
3093
3094 pool = find_literal_pool ();
3095 if (pool == NULL
3096 || pool->symbol == NULL
3097 || pool->next_free_entry == 0)
3098 return;
3099
3100 mapping_state (MAP_DATA);
3101
3102 /* Align pool as you have word accesses.
3103 Only make a frag if we have to. */
3104 if (!need_pass_2)
3105 frag_align (2, 0, 0);
3106
3107 record_alignment (now_seg, 2);
3108
3109 sprintf (sym_name, "$$lit_\002%x", pool->id);
3110
3111 symbol_locate (pool->symbol, sym_name, now_seg,
3112 (valueT) frag_now_fix (), frag_now);
3113 symbol_table_insert (pool->symbol);
3114
3115 ARM_SET_THUMB (pool->symbol, thumb_mode);
3116
3117 #if defined OBJ_COFF || defined OBJ_ELF
3118 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3119 #endif
3120
3121 for (entry = 0; entry < pool->next_free_entry; entry ++)
3122 /* First output the expression in the instruction to the pool. */
3123 emit_expr (&(pool->literals[entry]), 4); /* .word */
3124
3125 /* Mark the pool as empty. */
3126 pool->next_free_entry = 0;
3127 pool->symbol = NULL;
3128 }
3129
3130 #ifdef OBJ_ELF
3131 /* Forward declarations for functions below, in the MD interface
3132 section. */
3133 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3134 static valueT create_unwind_entry (int);
3135 static void start_unwind_section (const segT, int);
3136 static void add_unwind_opcode (valueT, int);
3137 static void flush_pending_unwind (void);
3138
3139 /* Directives: Data. */
3140
3141 static void
3142 s_arm_elf_cons (int nbytes)
3143 {
3144 expressionS exp;
3145
3146 #ifdef md_flush_pending_output
3147 md_flush_pending_output ();
3148 #endif
3149
3150 if (is_it_end_of_statement ())
3151 {
3152 demand_empty_rest_of_line ();
3153 return;
3154 }
3155
3156 #ifdef md_cons_align
3157 md_cons_align (nbytes);
3158 #endif
3159
3160 mapping_state (MAP_DATA);
3161 do
3162 {
3163 int reloc;
3164 char *base = input_line_pointer;
3165
3166 expression (& exp);
3167
3168 if (exp.X_op != O_symbol)
3169 emit_expr (&exp, (unsigned int) nbytes);
3170 else
3171 {
3172 char *before_reloc = input_line_pointer;
3173 reloc = parse_reloc (&input_line_pointer);
3174 if (reloc == -1)
3175 {
3176 as_bad (_("unrecognized relocation suffix"));
3177 ignore_rest_of_line ();
3178 return;
3179 }
3180 else if (reloc == BFD_RELOC_UNUSED)
3181 emit_expr (&exp, (unsigned int) nbytes);
3182 else
3183 {
3184 reloc_howto_type *howto = (reloc_howto_type *)
3185 bfd_reloc_type_lookup (stdoutput,
3186 (bfd_reloc_code_real_type) reloc);
3187 int size = bfd_get_reloc_size (howto);
3188
3189 if (reloc == BFD_RELOC_ARM_PLT32)
3190 {
3191 as_bad (_("(plt) is only valid on branch targets"));
3192 reloc = BFD_RELOC_UNUSED;
3193 size = 0;
3194 }
3195
3196 if (size > nbytes)
3197 as_bad (_("%s relocations do not fit in %d bytes"),
3198 howto->name, nbytes);
3199 else
3200 {
3201 /* We've parsed an expression stopping at O_symbol.
3202 But there may be more expression left now that we
3203 have parsed the relocation marker. Parse it again.
3204 XXX Surely there is a cleaner way to do this. */
3205 char *p = input_line_pointer;
3206 int offset;
3207 char *save_buf = (char *) alloca (input_line_pointer - base);
3208 memcpy (save_buf, base, input_line_pointer - base);
3209 memmove (base + (input_line_pointer - before_reloc),
3210 base, before_reloc - base);
3211
3212 input_line_pointer = base + (input_line_pointer-before_reloc);
3213 expression (&exp);
3214 memcpy (base, save_buf, p - base);
3215
3216 offset = nbytes - size;
3217 p = frag_more ((int) nbytes);
3218 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3219 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3220 }
3221 }
3222 }
3223 }
3224 while (*input_line_pointer++ == ',');
3225
3226 /* Put terminator back into stream. */
3227 input_line_pointer --;
3228 demand_empty_rest_of_line ();
3229 }
3230
3231 /* Emit an expression containing a 32-bit thumb instruction.
3232 Implementation based on put_thumb32_insn. */
3233
3234 static void
3235 emit_thumb32_expr (expressionS * exp)
3236 {
3237 expressionS exp_high = *exp;
3238
3239 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3240 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3241 exp->X_add_number &= 0xffff;
3242 emit_expr (exp, (unsigned int) THUMB_SIZE);
3243 }
3244
3245 /* Guess the instruction size based on the opcode. */
3246
3247 static int
3248 thumb_insn_size (int opcode)
3249 {
3250 if ((unsigned int) opcode < 0xe800u)
3251 return 2;
3252 else if ((unsigned int) opcode >= 0xe8000000u)
3253 return 4;
3254 else
3255 return 0;
3256 }
3257
3258 static bfd_boolean
3259 emit_insn (expressionS *exp, int nbytes)
3260 {
3261 int size = 0;
3262
3263 if (exp->X_op == O_constant)
3264 {
3265 size = nbytes;
3266
3267 if (size == 0)
3268 size = thumb_insn_size (exp->X_add_number);
3269
3270 if (size != 0)
3271 {
3272 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3273 {
3274 as_bad (_(".inst.n operand too big. "\
3275 "Use .inst.w instead"));
3276 size = 0;
3277 }
3278 else
3279 {
3280 if (now_it.state == AUTOMATIC_IT_BLOCK)
3281 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3282 else
3283 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3284
3285 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3286 emit_thumb32_expr (exp);
3287 else
3288 emit_expr (exp, (unsigned int) size);
3289
3290 it_fsm_post_encode ();
3291 }
3292 }
3293 else
3294 as_bad (_("cannot determine Thumb instruction size. " \
3295 "Use .inst.n/.inst.w instead"));
3296 }
3297 else
3298 as_bad (_("constant expression required"));
3299
3300 return (size != 0);
3301 }
3302
3303 /* Like s_arm_elf_cons but do not use md_cons_align and
3304 set the mapping state to MAP_ARM/MAP_THUMB. */
3305
3306 static void
3307 s_arm_elf_inst (int nbytes)
3308 {
3309 if (is_it_end_of_statement ())
3310 {
3311 demand_empty_rest_of_line ();
3312 return;
3313 }
3314
3315 /* Calling mapping_state () here will not change ARM/THUMB,
3316 but will ensure not to be in DATA state. */
3317
3318 if (thumb_mode)
3319 mapping_state (MAP_THUMB);
3320 else
3321 {
3322 if (nbytes != 0)
3323 {
3324 as_bad (_("width suffixes are invalid in ARM mode"));
3325 ignore_rest_of_line ();
3326 return;
3327 }
3328
3329 nbytes = 4;
3330
3331 mapping_state (MAP_ARM);
3332 }
3333
3334 do
3335 {
3336 expressionS exp;
3337
3338 expression (& exp);
3339
3340 if (! emit_insn (& exp, nbytes))
3341 {
3342 ignore_rest_of_line ();
3343 return;
3344 }
3345 }
3346 while (*input_line_pointer++ == ',');
3347
3348 /* Put terminator back into stream. */
3349 input_line_pointer --;
3350 demand_empty_rest_of_line ();
3351 }
3352
3353 /* Parse a .rel31 directive. */
3354
3355 static void
3356 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3357 {
3358 expressionS exp;
3359 char *p;
3360 valueT highbit;
3361
3362 highbit = 0;
3363 if (*input_line_pointer == '1')
3364 highbit = 0x80000000;
3365 else if (*input_line_pointer != '0')
3366 as_bad (_("expected 0 or 1"));
3367
3368 input_line_pointer++;
3369 if (*input_line_pointer != ',')
3370 as_bad (_("missing comma"));
3371 input_line_pointer++;
3372
3373 #ifdef md_flush_pending_output
3374 md_flush_pending_output ();
3375 #endif
3376
3377 #ifdef md_cons_align
3378 md_cons_align (4);
3379 #endif
3380
3381 mapping_state (MAP_DATA);
3382
3383 expression (&exp);
3384
3385 p = frag_more (4);
3386 md_number_to_chars (p, highbit, 4);
3387 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3388 BFD_RELOC_ARM_PREL31);
3389
3390 demand_empty_rest_of_line ();
3391 }
3392
3393 /* Directives: AEABI stack-unwind tables. */
3394
3395 /* Parse an unwind_fnstart directive. Simply records the current location. */
3396
3397 static void
3398 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3399 {
3400 demand_empty_rest_of_line ();
3401 if (unwind.proc_start)
3402 {
3403 as_bad (_("duplicate .fnstart directive"));
3404 return;
3405 }
3406
3407 /* Mark the start of the function. */
3408 unwind.proc_start = expr_build_dot ();
3409
3410 /* Reset the rest of the unwind info. */
3411 unwind.opcode_count = 0;
3412 unwind.table_entry = NULL;
3413 unwind.personality_routine = NULL;
3414 unwind.personality_index = -1;
3415 unwind.frame_size = 0;
3416 unwind.fp_offset = 0;
3417 unwind.fp_reg = REG_SP;
3418 unwind.fp_used = 0;
3419 unwind.sp_restored = 0;
3420 }
3421
3422
3423 /* Parse a handlerdata directive. Creates the exception handling table entry
3424 for the function. */
3425
3426 static void
3427 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3428 {
3429 demand_empty_rest_of_line ();
3430 if (!unwind.proc_start)
3431 as_bad (MISSING_FNSTART);
3432
3433 if (unwind.table_entry)
3434 as_bad (_("duplicate .handlerdata directive"));
3435
3436 create_unwind_entry (1);
3437 }
3438
3439 /* Parse an unwind_fnend directive. Generates the index table entry. */
3440
3441 static void
3442 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3443 {
3444 long where;
3445 char *ptr;
3446 valueT val;
3447 unsigned int marked_pr_dependency;
3448
3449 demand_empty_rest_of_line ();
3450
3451 if (!unwind.proc_start)
3452 {
3453 as_bad (_(".fnend directive without .fnstart"));
3454 return;
3455 }
3456
3457 /* Add eh table entry. */
3458 if (unwind.table_entry == NULL)
3459 val = create_unwind_entry (0);
3460 else
3461 val = 0;
3462
3463 /* Add index table entry. This is two words. */
3464 start_unwind_section (unwind.saved_seg, 1);
3465 frag_align (2, 0, 0);
3466 record_alignment (now_seg, 2);
3467
3468 ptr = frag_more (8);
3469 where = frag_now_fix () - 8;
3470
3471 /* Self relative offset of the function start. */
3472 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3473 BFD_RELOC_ARM_PREL31);
3474
3475 /* Indicate dependency on EHABI-defined personality routines to the
3476 linker, if it hasn't been done already. */
3477 marked_pr_dependency
3478 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3479 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3480 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3481 {
3482 static const char *const name[] =
3483 {
3484 "__aeabi_unwind_cpp_pr0",
3485 "__aeabi_unwind_cpp_pr1",
3486 "__aeabi_unwind_cpp_pr2"
3487 };
3488 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3489 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3490 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3491 |= 1 << unwind.personality_index;
3492 }
3493
3494 if (val)
3495 /* Inline exception table entry. */
3496 md_number_to_chars (ptr + 4, val, 4);
3497 else
3498 /* Self relative offset of the table entry. */
3499 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3500 BFD_RELOC_ARM_PREL31);
3501
3502 /* Restore the original section. */
3503 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3504
3505 unwind.proc_start = NULL;
3506 }
3507
3508
3509 /* Parse an unwind_cantunwind directive. */
3510
3511 static void
3512 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3513 {
3514 demand_empty_rest_of_line ();
3515 if (!unwind.proc_start)
3516 as_bad (MISSING_FNSTART);
3517
3518 if (unwind.personality_routine || unwind.personality_index != -1)
3519 as_bad (_("personality routine specified for cantunwind frame"));
3520
3521 unwind.personality_index = -2;
3522 }
3523
3524
3525 /* Parse a personalityindex directive. */
3526
3527 static void
3528 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3529 {
3530 expressionS exp;
3531
3532 if (!unwind.proc_start)
3533 as_bad (MISSING_FNSTART);
3534
3535 if (unwind.personality_routine || unwind.personality_index != -1)
3536 as_bad (_("duplicate .personalityindex directive"));
3537
3538 expression (&exp);
3539
3540 if (exp.X_op != O_constant
3541 || exp.X_add_number < 0 || exp.X_add_number > 15)
3542 {
3543 as_bad (_("bad personality routine number"));
3544 ignore_rest_of_line ();
3545 return;
3546 }
3547
3548 unwind.personality_index = exp.X_add_number;
3549
3550 demand_empty_rest_of_line ();
3551 }
3552
3553
3554 /* Parse a personality directive. */
3555
3556 static void
3557 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3558 {
3559 char *name, *p, c;
3560
3561 if (!unwind.proc_start)
3562 as_bad (MISSING_FNSTART);
3563
3564 if (unwind.personality_routine || unwind.personality_index != -1)
3565 as_bad (_("duplicate .personality directive"));
3566
3567 name = input_line_pointer;
3568 c = get_symbol_end ();
3569 p = input_line_pointer;
3570 unwind.personality_routine = symbol_find_or_make (name);
3571 *p = c;
3572 demand_empty_rest_of_line ();
3573 }
3574
3575
3576 /* Parse a directive saving core registers. */
3577
3578 static void
3579 s_arm_unwind_save_core (void)
3580 {
3581 valueT op;
3582 long range;
3583 int n;
3584
3585 range = parse_reg_list (&input_line_pointer);
3586 if (range == FAIL)
3587 {
3588 as_bad (_("expected register list"));
3589 ignore_rest_of_line ();
3590 return;
3591 }
3592
3593 demand_empty_rest_of_line ();
3594
3595 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3596 into .unwind_save {..., sp...}. We aren't bothered about the value of
3597 ip because it is clobbered by calls. */
3598 if (unwind.sp_restored && unwind.fp_reg == 12
3599 && (range & 0x3000) == 0x1000)
3600 {
3601 unwind.opcode_count--;
3602 unwind.sp_restored = 0;
3603 range = (range | 0x2000) & ~0x1000;
3604 unwind.pending_offset = 0;
3605 }
3606
3607 /* Pop r4-r15. */
3608 if (range & 0xfff0)
3609 {
3610 /* See if we can use the short opcodes. These pop a block of up to 8
3611 registers starting with r4, plus maybe r14. */
3612 for (n = 0; n < 8; n++)
3613 {
3614 /* Break at the first non-saved register. */
3615 if ((range & (1 << (n + 4))) == 0)
3616 break;
3617 }
3618 /* See if there are any other bits set. */
3619 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3620 {
3621 /* Use the long form. */
3622 op = 0x8000 | ((range >> 4) & 0xfff);
3623 add_unwind_opcode (op, 2);
3624 }
3625 else
3626 {
3627 /* Use the short form. */
3628 if (range & 0x4000)
3629 op = 0xa8; /* Pop r14. */
3630 else
3631 op = 0xa0; /* Do not pop r14. */
3632 op |= (n - 1);
3633 add_unwind_opcode (op, 1);
3634 }
3635 }
3636
3637 /* Pop r0-r3. */
3638 if (range & 0xf)
3639 {
3640 op = 0xb100 | (range & 0xf);
3641 add_unwind_opcode (op, 2);
3642 }
3643
3644 /* Record the number of bytes pushed. */
3645 for (n = 0; n < 16; n++)
3646 {
3647 if (range & (1 << n))
3648 unwind.frame_size += 4;
3649 }
3650 }
3651
3652
3653 /* Parse a directive saving FPA registers. */
3654
3655 static void
3656 s_arm_unwind_save_fpa (int reg)
3657 {
3658 expressionS exp;
3659 int num_regs;
3660 valueT op;
3661
3662 /* Get Number of registers to transfer. */
3663 if (skip_past_comma (&input_line_pointer) != FAIL)
3664 expression (&exp);
3665 else
3666 exp.X_op = O_illegal;
3667
3668 if (exp.X_op != O_constant)
3669 {
3670 as_bad (_("expected , <constant>"));
3671 ignore_rest_of_line ();
3672 return;
3673 }
3674
3675 num_regs = exp.X_add_number;
3676
3677 if (num_regs < 1 || num_regs > 4)
3678 {
3679 as_bad (_("number of registers must be in the range [1:4]"));
3680 ignore_rest_of_line ();
3681 return;
3682 }
3683
3684 demand_empty_rest_of_line ();
3685
3686 if (reg == 4)
3687 {
3688 /* Short form. */
3689 op = 0xb4 | (num_regs - 1);
3690 add_unwind_opcode (op, 1);
3691 }
3692 else
3693 {
3694 /* Long form. */
3695 op = 0xc800 | (reg << 4) | (num_regs - 1);
3696 add_unwind_opcode (op, 2);
3697 }
3698 unwind.frame_size += num_regs * 12;
3699 }
3700
3701
3702 /* Parse a directive saving VFP registers for ARMv6 and above. */
3703
3704 static void
3705 s_arm_unwind_save_vfp_armv6 (void)
3706 {
3707 int count;
3708 unsigned int start;
3709 valueT op;
3710 int num_vfpv3_regs = 0;
3711 int num_regs_below_16;
3712
3713 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3714 if (count == FAIL)
3715 {
3716 as_bad (_("expected register list"));
3717 ignore_rest_of_line ();
3718 return;
3719 }
3720
3721 demand_empty_rest_of_line ();
3722
3723 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3724 than FSTMX/FLDMX-style ones). */
3725
3726 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3727 if (start >= 16)
3728 num_vfpv3_regs = count;
3729 else if (start + count > 16)
3730 num_vfpv3_regs = start + count - 16;
3731
3732 if (num_vfpv3_regs > 0)
3733 {
3734 int start_offset = start > 16 ? start - 16 : 0;
3735 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3736 add_unwind_opcode (op, 2);
3737 }
3738
3739 /* Generate opcode for registers numbered in the range 0 .. 15. */
3740 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3741 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3742 if (num_regs_below_16 > 0)
3743 {
3744 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3745 add_unwind_opcode (op, 2);
3746 }
3747
3748 unwind.frame_size += count * 8;
3749 }
3750
3751
3752 /* Parse a directive saving VFP registers for pre-ARMv6. */
3753
3754 static void
3755 s_arm_unwind_save_vfp (void)
3756 {
3757 int count;
3758 unsigned int reg;
3759 valueT op;
3760
3761 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3762 if (count == FAIL)
3763 {
3764 as_bad (_("expected register list"));
3765 ignore_rest_of_line ();
3766 return;
3767 }
3768
3769 demand_empty_rest_of_line ();
3770
3771 if (reg == 8)
3772 {
3773 /* Short form. */
3774 op = 0xb8 | (count - 1);
3775 add_unwind_opcode (op, 1);
3776 }
3777 else
3778 {
3779 /* Long form. */
3780 op = 0xb300 | (reg << 4) | (count - 1);
3781 add_unwind_opcode (op, 2);
3782 }
3783 unwind.frame_size += count * 8 + 4;
3784 }
3785
3786
3787 /* Parse a directive saving iWMMXt data registers. */
3788
3789 static void
3790 s_arm_unwind_save_mmxwr (void)
3791 {
3792 int reg;
3793 int hi_reg;
3794 int i;
3795 unsigned mask = 0;
3796 valueT op;
3797
3798 if (*input_line_pointer == '{')
3799 input_line_pointer++;
3800
3801 do
3802 {
3803 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3804
3805 if (reg == FAIL)
3806 {
3807 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3808 goto error;
3809 }
3810
3811 if (mask >> reg)
3812 as_tsktsk (_("register list not in ascending order"));
3813 mask |= 1 << reg;
3814
3815 if (*input_line_pointer == '-')
3816 {
3817 input_line_pointer++;
3818 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3819 if (hi_reg == FAIL)
3820 {
3821 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3822 goto error;
3823 }
3824 else if (reg >= hi_reg)
3825 {
3826 as_bad (_("bad register range"));
3827 goto error;
3828 }
3829 for (; reg < hi_reg; reg++)
3830 mask |= 1 << reg;
3831 }
3832 }
3833 while (skip_past_comma (&input_line_pointer) != FAIL);
3834
3835 if (*input_line_pointer == '}')
3836 input_line_pointer++;
3837
3838 demand_empty_rest_of_line ();
3839
3840 /* Generate any deferred opcodes because we're going to be looking at
3841 the list. */
3842 flush_pending_unwind ();
3843
3844 for (i = 0; i < 16; i++)
3845 {
3846 if (mask & (1 << i))
3847 unwind.frame_size += 8;
3848 }
3849
3850 /* Attempt to combine with a previous opcode. We do this because gcc
3851 likes to output separate unwind directives for a single block of
3852 registers. */
3853 if (unwind.opcode_count > 0)
3854 {
3855 i = unwind.opcodes[unwind.opcode_count - 1];
3856 if ((i & 0xf8) == 0xc0)
3857 {
3858 i &= 7;
3859 /* Only merge if the blocks are contiguous. */
3860 if (i < 6)
3861 {
3862 if ((mask & 0xfe00) == (1 << 9))
3863 {
3864 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3865 unwind.opcode_count--;
3866 }
3867 }
3868 else if (i == 6 && unwind.opcode_count >= 2)
3869 {
3870 i = unwind.opcodes[unwind.opcode_count - 2];
3871 reg = i >> 4;
3872 i &= 0xf;
3873
3874 op = 0xffff << (reg - 1);
3875 if (reg > 0
3876 && ((mask & op) == (1u << (reg - 1))))
3877 {
3878 op = (1 << (reg + i + 1)) - 1;
3879 op &= ~((1 << reg) - 1);
3880 mask |= op;
3881 unwind.opcode_count -= 2;
3882 }
3883 }
3884 }
3885 }
3886
3887 hi_reg = 15;
3888 /* We want to generate opcodes in the order the registers have been
3889 saved, ie. descending order. */
3890 for (reg = 15; reg >= -1; reg--)
3891 {
3892 /* Save registers in blocks. */
3893 if (reg < 0
3894 || !(mask & (1 << reg)))
3895 {
3896 /* We found an unsaved reg. Generate opcodes to save the
3897 preceding block. */
3898 if (reg != hi_reg)
3899 {
3900 if (reg == 9)
3901 {
3902 /* Short form. */
3903 op = 0xc0 | (hi_reg - 10);
3904 add_unwind_opcode (op, 1);
3905 }
3906 else
3907 {
3908 /* Long form. */
3909 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3910 add_unwind_opcode (op, 2);
3911 }
3912 }
3913 hi_reg = reg - 1;
3914 }
3915 }
3916
3917 return;
3918 error:
3919 ignore_rest_of_line ();
3920 }
3921
3922 static void
3923 s_arm_unwind_save_mmxwcg (void)
3924 {
3925 int reg;
3926 int hi_reg;
3927 unsigned mask = 0;
3928 valueT op;
3929
3930 if (*input_line_pointer == '{')
3931 input_line_pointer++;
3932
3933 do
3934 {
3935 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3936
3937 if (reg == FAIL)
3938 {
3939 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3940 goto error;
3941 }
3942
3943 reg -= 8;
3944 if (mask >> reg)
3945 as_tsktsk (_("register list not in ascending order"));
3946 mask |= 1 << reg;
3947
3948 if (*input_line_pointer == '-')
3949 {
3950 input_line_pointer++;
3951 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3952 if (hi_reg == FAIL)
3953 {
3954 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3955 goto error;
3956 }
3957 else if (reg >= hi_reg)
3958 {
3959 as_bad (_("bad register range"));
3960 goto error;
3961 }
3962 for (; reg < hi_reg; reg++)
3963 mask |= 1 << reg;
3964 }
3965 }
3966 while (skip_past_comma (&input_line_pointer) != FAIL);
3967
3968 if (*input_line_pointer == '}')
3969 input_line_pointer++;
3970
3971 demand_empty_rest_of_line ();
3972
3973 /* Generate any deferred opcodes because we're going to be looking at
3974 the list. */
3975 flush_pending_unwind ();
3976
3977 for (reg = 0; reg < 16; reg++)
3978 {
3979 if (mask & (1 << reg))
3980 unwind.frame_size += 4;
3981 }
3982 op = 0xc700 | mask;
3983 add_unwind_opcode (op, 2);
3984 return;
3985 error:
3986 ignore_rest_of_line ();
3987 }
3988
3989
3990 /* Parse an unwind_save directive.
3991 If the argument is non-zero, this is a .vsave directive. */
3992
3993 static void
3994 s_arm_unwind_save (int arch_v6)
3995 {
3996 char *peek;
3997 struct reg_entry *reg;
3998 bfd_boolean had_brace = FALSE;
3999
4000 if (!unwind.proc_start)
4001 as_bad (MISSING_FNSTART);
4002
4003 /* Figure out what sort of save we have. */
4004 peek = input_line_pointer;
4005
4006 if (*peek == '{')
4007 {
4008 had_brace = TRUE;
4009 peek++;
4010 }
4011
4012 reg = arm_reg_parse_multi (&peek);
4013
4014 if (!reg)
4015 {
4016 as_bad (_("register expected"));
4017 ignore_rest_of_line ();
4018 return;
4019 }
4020
4021 switch (reg->type)
4022 {
4023 case REG_TYPE_FN:
4024 if (had_brace)
4025 {
4026 as_bad (_("FPA .unwind_save does not take a register list"));
4027 ignore_rest_of_line ();
4028 return;
4029 }
4030 input_line_pointer = peek;
4031 s_arm_unwind_save_fpa (reg->number);
4032 return;
4033
4034 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
4035 case REG_TYPE_VFD:
4036 if (arch_v6)
4037 s_arm_unwind_save_vfp_armv6 ();
4038 else
4039 s_arm_unwind_save_vfp ();
4040 return;
4041 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
4042 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4043
4044 default:
4045 as_bad (_(".unwind_save does not support this kind of register"));
4046 ignore_rest_of_line ();
4047 }
4048 }
4049
4050
4051 /* Parse an unwind_movsp directive. */
4052
4053 static void
4054 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4055 {
4056 int reg;
4057 valueT op;
4058 int offset;
4059
4060 if (!unwind.proc_start)
4061 as_bad (MISSING_FNSTART);
4062
4063 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4064 if (reg == FAIL)
4065 {
4066 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4067 ignore_rest_of_line ();
4068 return;
4069 }
4070
4071 /* Optional constant. */
4072 if (skip_past_comma (&input_line_pointer) != FAIL)
4073 {
4074 if (immediate_for_directive (&offset) == FAIL)
4075 return;
4076 }
4077 else
4078 offset = 0;
4079
4080 demand_empty_rest_of_line ();
4081
4082 if (reg == REG_SP || reg == REG_PC)
4083 {
4084 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4085 return;
4086 }
4087
4088 if (unwind.fp_reg != REG_SP)
4089 as_bad (_("unexpected .unwind_movsp directive"));
4090
4091 /* Generate opcode to restore the value. */
4092 op = 0x90 | reg;
4093 add_unwind_opcode (op, 1);
4094
4095 /* Record the information for later. */
4096 unwind.fp_reg = reg;
4097 unwind.fp_offset = unwind.frame_size - offset;
4098 unwind.sp_restored = 1;
4099 }
4100
4101 /* Parse an unwind_pad directive. */
4102
4103 static void
4104 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4105 {
4106 int offset;
4107
4108 if (!unwind.proc_start)
4109 as_bad (MISSING_FNSTART);
4110
4111 if (immediate_for_directive (&offset) == FAIL)
4112 return;
4113
4114 if (offset & 3)
4115 {
4116 as_bad (_("stack increment must be multiple of 4"));
4117 ignore_rest_of_line ();
4118 return;
4119 }
4120
4121 /* Don't generate any opcodes, just record the details for later. */
4122 unwind.frame_size += offset;
4123 unwind.pending_offset += offset;
4124
4125 demand_empty_rest_of_line ();
4126 }
4127
4128 /* Parse an unwind_setfp directive. */
4129
4130 static void
4131 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4132 {
4133 int sp_reg;
4134 int fp_reg;
4135 int offset;
4136
4137 if (!unwind.proc_start)
4138 as_bad (MISSING_FNSTART);
4139
4140 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4141 if (skip_past_comma (&input_line_pointer) == FAIL)
4142 sp_reg = FAIL;
4143 else
4144 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4145
4146 if (fp_reg == FAIL || sp_reg == FAIL)
4147 {
4148 as_bad (_("expected <reg>, <reg>"));
4149 ignore_rest_of_line ();
4150 return;
4151 }
4152
4153 /* Optional constant. */
4154 if (skip_past_comma (&input_line_pointer) != FAIL)
4155 {
4156 if (immediate_for_directive (&offset) == FAIL)
4157 return;
4158 }
4159 else
4160 offset = 0;
4161
4162 demand_empty_rest_of_line ();
4163
4164 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4165 {
4166 as_bad (_("register must be either sp or set by a previous"
4167 "unwind_movsp directive"));
4168 return;
4169 }
4170
4171 /* Don't generate any opcodes, just record the information for later. */
4172 unwind.fp_reg = fp_reg;
4173 unwind.fp_used = 1;
4174 if (sp_reg == REG_SP)
4175 unwind.fp_offset = unwind.frame_size - offset;
4176 else
4177 unwind.fp_offset -= offset;
4178 }
4179
4180 /* Parse an unwind_raw directive. */
4181
4182 static void
4183 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4184 {
4185 expressionS exp;
4186 /* This is an arbitrary limit. */
4187 unsigned char op[16];
4188 int count;
4189
4190 if (!unwind.proc_start)
4191 as_bad (MISSING_FNSTART);
4192
4193 expression (&exp);
4194 if (exp.X_op == O_constant
4195 && skip_past_comma (&input_line_pointer) != FAIL)
4196 {
4197 unwind.frame_size += exp.X_add_number;
4198 expression (&exp);
4199 }
4200 else
4201 exp.X_op = O_illegal;
4202
4203 if (exp.X_op != O_constant)
4204 {
4205 as_bad (_("expected <offset>, <opcode>"));
4206 ignore_rest_of_line ();
4207 return;
4208 }
4209
4210 count = 0;
4211
4212 /* Parse the opcode. */
4213 for (;;)
4214 {
4215 if (count >= 16)
4216 {
4217 as_bad (_("unwind opcode too long"));
4218 ignore_rest_of_line ();
4219 }
4220 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4221 {
4222 as_bad (_("invalid unwind opcode"));
4223 ignore_rest_of_line ();
4224 return;
4225 }
4226 op[count++] = exp.X_add_number;
4227
4228 /* Parse the next byte. */
4229 if (skip_past_comma (&input_line_pointer) == FAIL)
4230 break;
4231
4232 expression (&exp);
4233 }
4234
4235 /* Add the opcode bytes in reverse order. */
4236 while (count--)
4237 add_unwind_opcode (op[count], 1);
4238
4239 demand_empty_rest_of_line ();
4240 }
4241
4242
4243 /* Parse a .eabi_attribute directive. */
4244
4245 static void
4246 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4247 {
4248 int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4249
4250 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4251 attributes_set_explicitly[tag] = 1;
4252 }
4253 #endif /* OBJ_ELF */
4254
4255 static void s_arm_arch (int);
4256 static void s_arm_object_arch (int);
4257 static void s_arm_cpu (int);
4258 static void s_arm_fpu (int);
4259
4260 #ifdef TE_PE
4261
4262 static void
4263 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4264 {
4265 expressionS exp;
4266
4267 do
4268 {
4269 expression (&exp);
4270 if (exp.X_op == O_symbol)
4271 exp.X_op = O_secrel;
4272
4273 emit_expr (&exp, 4);
4274 }
4275 while (*input_line_pointer++ == ',');
4276
4277 input_line_pointer--;
4278 demand_empty_rest_of_line ();
4279 }
4280 #endif /* TE_PE */
4281
4282 /* This table describes all the machine specific pseudo-ops the assembler
4283 has to support. The fields are:
4284 pseudo-op name without dot
4285 function to call to execute this pseudo-op
4286 Integer arg to pass to the function. */
4287
4288 const pseudo_typeS md_pseudo_table[] =
4289 {
4290 /* Never called because '.req' does not start a line. */
4291 { "req", s_req, 0 },
4292 /* Following two are likewise never called. */
4293 { "dn", s_dn, 0 },
4294 { "qn", s_qn, 0 },
4295 { "unreq", s_unreq, 0 },
4296 { "bss", s_bss, 0 },
4297 { "align", s_align, 0 },
4298 { "arm", s_arm, 0 },
4299 { "thumb", s_thumb, 0 },
4300 { "code", s_code, 0 },
4301 { "force_thumb", s_force_thumb, 0 },
4302 { "thumb_func", s_thumb_func, 0 },
4303 { "thumb_set", s_thumb_set, 0 },
4304 { "even", s_even, 0 },
4305 { "ltorg", s_ltorg, 0 },
4306 { "pool", s_ltorg, 0 },
4307 { "syntax", s_syntax, 0 },
4308 { "cpu", s_arm_cpu, 0 },
4309 { "arch", s_arm_arch, 0 },
4310 { "object_arch", s_arm_object_arch, 0 },
4311 { "fpu", s_arm_fpu, 0 },
4312 #ifdef OBJ_ELF
4313 { "word", s_arm_elf_cons, 4 },
4314 { "long", s_arm_elf_cons, 4 },
4315 { "inst.n", s_arm_elf_inst, 2 },
4316 { "inst.w", s_arm_elf_inst, 4 },
4317 { "inst", s_arm_elf_inst, 0 },
4318 { "rel31", s_arm_rel31, 0 },
4319 { "fnstart", s_arm_unwind_fnstart, 0 },
4320 { "fnend", s_arm_unwind_fnend, 0 },
4321 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4322 { "personality", s_arm_unwind_personality, 0 },
4323 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4324 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4325 { "save", s_arm_unwind_save, 0 },
4326 { "vsave", s_arm_unwind_save, 1 },
4327 { "movsp", s_arm_unwind_movsp, 0 },
4328 { "pad", s_arm_unwind_pad, 0 },
4329 { "setfp", s_arm_unwind_setfp, 0 },
4330 { "unwind_raw", s_arm_unwind_raw, 0 },
4331 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4332 #else
4333 { "word", cons, 4},
4334
4335 /* These are used for dwarf. */
4336 {"2byte", cons, 2},
4337 {"4byte", cons, 4},
4338 {"8byte", cons, 8},
4339 /* These are used for dwarf2. */
4340 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4341 { "loc", dwarf2_directive_loc, 0 },
4342 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4343 #endif
4344 { "extend", float_cons, 'x' },
4345 { "ldouble", float_cons, 'x' },
4346 { "packed", float_cons, 'p' },
4347 #ifdef TE_PE
4348 {"secrel32", pe_directive_secrel, 0},
4349 #endif
4350 { 0, 0, 0 }
4351 };
4352 \f
4353 /* Parser functions used exclusively in instruction operands. */
4354
4355 /* Generic immediate-value read function for use in insn parsing.
4356 STR points to the beginning of the immediate (the leading #);
4357 VAL receives the value; if the value is outside [MIN, MAX]
4358 issue an error. PREFIX_OPT is true if the immediate prefix is
4359 optional. */
4360
4361 static int
4362 parse_immediate (char **str, int *val, int min, int max,
4363 bfd_boolean prefix_opt)
4364 {
4365 expressionS exp;
4366 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4367 if (exp.X_op != O_constant)
4368 {
4369 inst.error = _("constant expression required");
4370 return FAIL;
4371 }
4372
4373 if (exp.X_add_number < min || exp.X_add_number > max)
4374 {
4375 inst.error = _("immediate value out of range");
4376 return FAIL;
4377 }
4378
4379 *val = exp.X_add_number;
4380 return SUCCESS;
4381 }
4382
4383 /* Less-generic immediate-value read function with the possibility of loading a
4384 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4385 instructions. Puts the result directly in inst.operands[i]. */
4386
4387 static int
4388 parse_big_immediate (char **str, int i)
4389 {
4390 expressionS exp;
4391 char *ptr = *str;
4392
4393 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4394
4395 if (exp.X_op == O_constant)
4396 {
4397 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4398 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4399 O_constant. We have to be careful not to break compilation for
4400 32-bit X_add_number, though. */
4401 if ((exp.X_add_number & ~0xffffffffl) != 0)
4402 {
4403 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4404 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4405 inst.operands[i].regisimm = 1;
4406 }
4407 }
4408 else if (exp.X_op == O_big
4409 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4410 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4411 {
4412 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4413 /* Bignums have their least significant bits in
4414 generic_bignum[0]. Make sure we put 32 bits in imm and
4415 32 bits in reg, in a (hopefully) portable way. */
4416 gas_assert (parts != 0);
4417 inst.operands[i].imm = 0;
4418 for (j = 0; j < parts; j++, idx++)
4419 inst.operands[i].imm |= generic_bignum[idx]
4420 << (LITTLENUM_NUMBER_OF_BITS * j);
4421 inst.operands[i].reg = 0;
4422 for (j = 0; j < parts; j++, idx++)
4423 inst.operands[i].reg |= generic_bignum[idx]
4424 << (LITTLENUM_NUMBER_OF_BITS * j);
4425 inst.operands[i].regisimm = 1;
4426 }
4427 else
4428 return FAIL;
4429
4430 *str = ptr;
4431
4432 return SUCCESS;
4433 }
4434
4435 /* Returns the pseudo-register number of an FPA immediate constant,
4436 or FAIL if there isn't a valid constant here. */
4437
4438 static int
4439 parse_fpa_immediate (char ** str)
4440 {
4441 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4442 char * save_in;
4443 expressionS exp;
4444 int i;
4445 int j;
4446
4447 /* First try and match exact strings, this is to guarantee
4448 that some formats will work even for cross assembly. */
4449
4450 for (i = 0; fp_const[i]; i++)
4451 {
4452 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4453 {
4454 char *start = *str;
4455
4456 *str += strlen (fp_const[i]);
4457 if (is_end_of_line[(unsigned char) **str])
4458 return i + 8;
4459 *str = start;
4460 }
4461 }
4462
4463 /* Just because we didn't get a match doesn't mean that the constant
4464 isn't valid, just that it is in a format that we don't
4465 automatically recognize. Try parsing it with the standard
4466 expression routines. */
4467
4468 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4469
4470 /* Look for a raw floating point number. */
4471 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4472 && is_end_of_line[(unsigned char) *save_in])
4473 {
4474 for (i = 0; i < NUM_FLOAT_VALS; i++)
4475 {
4476 for (j = 0; j < MAX_LITTLENUMS; j++)
4477 {
4478 if (words[j] != fp_values[i][j])
4479 break;
4480 }
4481
4482 if (j == MAX_LITTLENUMS)
4483 {
4484 *str = save_in;
4485 return i + 8;
4486 }
4487 }
4488 }
4489
4490 /* Try and parse a more complex expression, this will probably fail
4491 unless the code uses a floating point prefix (eg "0f"). */
4492 save_in = input_line_pointer;
4493 input_line_pointer = *str;
4494 if (expression (&exp) == absolute_section
4495 && exp.X_op == O_big
4496 && exp.X_add_number < 0)
4497 {
4498 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4499 Ditto for 15. */
4500 if (gen_to_words (words, 5, (long) 15) == 0)
4501 {
4502 for (i = 0; i < NUM_FLOAT_VALS; i++)
4503 {
4504 for (j = 0; j < MAX_LITTLENUMS; j++)
4505 {
4506 if (words[j] != fp_values[i][j])
4507 break;
4508 }
4509
4510 if (j == MAX_LITTLENUMS)
4511 {
4512 *str = input_line_pointer;
4513 input_line_pointer = save_in;
4514 return i + 8;
4515 }
4516 }
4517 }
4518 }
4519
4520 *str = input_line_pointer;
4521 input_line_pointer = save_in;
4522 inst.error = _("invalid FPA immediate expression");
4523 return FAIL;
4524 }
4525
4526 /* Returns 1 if a number has "quarter-precision" float format
4527 0baBbbbbbc defgh000 00000000 00000000. */
4528
4529 static int
4530 is_quarter_float (unsigned imm)
4531 {
4532 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4533 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4534 }
4535
4536 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4537 0baBbbbbbc defgh000 00000000 00000000.
4538 The zero and minus-zero cases need special handling, since they can't be
4539 encoded in the "quarter-precision" float format, but can nonetheless be
4540 loaded as integer constants. */
4541
4542 static unsigned
4543 parse_qfloat_immediate (char **ccp, int *immed)
4544 {
4545 char *str = *ccp;
4546 char *fpnum;
4547 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4548 int found_fpchar = 0;
4549
4550 skip_past_char (&str, '#');
4551
4552 /* We must not accidentally parse an integer as a floating-point number. Make
4553 sure that the value we parse is not an integer by checking for special
4554 characters '.' or 'e'.
4555 FIXME: This is a horrible hack, but doing better is tricky because type
4556 information isn't in a very usable state at parse time. */
4557 fpnum = str;
4558 skip_whitespace (fpnum);
4559
4560 if (strncmp (fpnum, "0x", 2) == 0)
4561 return FAIL;
4562 else
4563 {
4564 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4565 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4566 {
4567 found_fpchar = 1;
4568 break;
4569 }
4570
4571 if (!found_fpchar)
4572 return FAIL;
4573 }
4574
4575 if ((str = atof_ieee (str, 's', words)) != NULL)
4576 {
4577 unsigned fpword = 0;
4578 int i;
4579
4580 /* Our FP word must be 32 bits (single-precision FP). */
4581 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4582 {
4583 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4584 fpword |= words[i];
4585 }
4586
4587 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4588 *immed = fpword;
4589 else
4590 return FAIL;
4591
4592 *ccp = str;
4593
4594 return SUCCESS;
4595 }
4596
4597 return FAIL;
4598 }
4599
4600 /* Shift operands. */
4601 enum shift_kind
4602 {
4603 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4604 };
4605
4606 struct asm_shift_name
4607 {
4608 const char *name;
4609 enum shift_kind kind;
4610 };
4611
4612 /* Third argument to parse_shift. */
4613 enum parse_shift_mode
4614 {
4615 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4616 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4617 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4618 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4619 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4620 };
4621
4622 /* Parse a <shift> specifier on an ARM data processing instruction.
4623 This has three forms:
4624
4625 (LSL|LSR|ASL|ASR|ROR) Rs
4626 (LSL|LSR|ASL|ASR|ROR) #imm
4627 RRX
4628
4629 Note that ASL is assimilated to LSL in the instruction encoding, and
4630 RRX to ROR #0 (which cannot be written as such). */
4631
4632 static int
4633 parse_shift (char **str, int i, enum parse_shift_mode mode)
4634 {
4635 const struct asm_shift_name *shift_name;
4636 enum shift_kind shift;
4637 char *s = *str;
4638 char *p = s;
4639 int reg;
4640
4641 for (p = *str; ISALPHA (*p); p++)
4642 ;
4643
4644 if (p == *str)
4645 {
4646 inst.error = _("shift expression expected");
4647 return FAIL;
4648 }
4649
4650 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4651 p - *str);
4652
4653 if (shift_name == NULL)
4654 {
4655 inst.error = _("shift expression expected");
4656 return FAIL;
4657 }
4658
4659 shift = shift_name->kind;
4660
4661 switch (mode)
4662 {
4663 case NO_SHIFT_RESTRICT:
4664 case SHIFT_IMMEDIATE: break;
4665
4666 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4667 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4668 {
4669 inst.error = _("'LSL' or 'ASR' required");
4670 return FAIL;
4671 }
4672 break;
4673
4674 case SHIFT_LSL_IMMEDIATE:
4675 if (shift != SHIFT_LSL)
4676 {
4677 inst.error = _("'LSL' required");
4678 return FAIL;
4679 }
4680 break;
4681
4682 case SHIFT_ASR_IMMEDIATE:
4683 if (shift != SHIFT_ASR)
4684 {
4685 inst.error = _("'ASR' required");
4686 return FAIL;
4687 }
4688 break;
4689
4690 default: abort ();
4691 }
4692
4693 if (shift != SHIFT_RRX)
4694 {
4695 /* Whitespace can appear here if the next thing is a bare digit. */
4696 skip_whitespace (p);
4697
4698 if (mode == NO_SHIFT_RESTRICT
4699 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4700 {
4701 inst.operands[i].imm = reg;
4702 inst.operands[i].immisreg = 1;
4703 }
4704 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4705 return FAIL;
4706 }
4707 inst.operands[i].shift_kind = shift;
4708 inst.operands[i].shifted = 1;
4709 *str = p;
4710 return SUCCESS;
4711 }
4712
4713 /* Parse a <shifter_operand> for an ARM data processing instruction:
4714
4715 #<immediate>
4716 #<immediate>, <rotate>
4717 <Rm>
4718 <Rm>, <shift>
4719
4720 where <shift> is defined by parse_shift above, and <rotate> is a
4721 multiple of 2 between 0 and 30. Validation of immediate operands
4722 is deferred to md_apply_fix. */
4723
4724 static int
4725 parse_shifter_operand (char **str, int i)
4726 {
4727 int value;
4728 expressionS exp;
4729
4730 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4731 {
4732 inst.operands[i].reg = value;
4733 inst.operands[i].isreg = 1;
4734
4735 /* parse_shift will override this if appropriate */
4736 inst.reloc.exp.X_op = O_constant;
4737 inst.reloc.exp.X_add_number = 0;
4738
4739 if (skip_past_comma (str) == FAIL)
4740 return SUCCESS;
4741
4742 /* Shift operation on register. */
4743 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4744 }
4745
4746 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4747 return FAIL;
4748
4749 if (skip_past_comma (str) == SUCCESS)
4750 {
4751 /* #x, y -- ie explicit rotation by Y. */
4752 if (my_get_expression (&exp, str, GE_NO_PREFIX))
4753 return FAIL;
4754
4755 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4756 {
4757 inst.error = _("constant expression expected");
4758 return FAIL;
4759 }
4760
4761 value = exp.X_add_number;
4762 if (value < 0 || value > 30 || value % 2 != 0)
4763 {
4764 inst.error = _("invalid rotation");
4765 return FAIL;
4766 }
4767 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4768 {
4769 inst.error = _("invalid constant");
4770 return FAIL;
4771 }
4772
4773 /* Convert to decoded value. md_apply_fix will put it back. */
4774 inst.reloc.exp.X_add_number
4775 = (((inst.reloc.exp.X_add_number << (32 - value))
4776 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4777 }
4778
4779 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4780 inst.reloc.pc_rel = 0;
4781 return SUCCESS;
4782 }
4783
4784 /* Group relocation information. Each entry in the table contains the
4785 textual name of the relocation as may appear in assembler source
4786 and must end with a colon.
4787 Along with this textual name are the relocation codes to be used if
4788 the corresponding instruction is an ALU instruction (ADD or SUB only),
4789 an LDR, an LDRS, or an LDC. */
4790
4791 struct group_reloc_table_entry
4792 {
4793 const char *name;
4794 int alu_code;
4795 int ldr_code;
4796 int ldrs_code;
4797 int ldc_code;
4798 };
4799
4800 typedef enum
4801 {
4802 /* Varieties of non-ALU group relocation. */
4803
4804 GROUP_LDR,
4805 GROUP_LDRS,
4806 GROUP_LDC
4807 } group_reloc_type;
4808
4809 static struct group_reloc_table_entry group_reloc_table[] =
4810 { /* Program counter relative: */
4811 { "pc_g0_nc",
4812 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4813 0, /* LDR */
4814 0, /* LDRS */
4815 0 }, /* LDC */
4816 { "pc_g0",
4817 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4818 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4819 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4820 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4821 { "pc_g1_nc",
4822 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4823 0, /* LDR */
4824 0, /* LDRS */
4825 0 }, /* LDC */
4826 { "pc_g1",
4827 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4828 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4829 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4830 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4831 { "pc_g2",
4832 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4833 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4834 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4835 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4836 /* Section base relative */
4837 { "sb_g0_nc",
4838 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4839 0, /* LDR */
4840 0, /* LDRS */
4841 0 }, /* LDC */
4842 { "sb_g0",
4843 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4844 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4845 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4846 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4847 { "sb_g1_nc",
4848 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4849 0, /* LDR */
4850 0, /* LDRS */
4851 0 }, /* LDC */
4852 { "sb_g1",
4853 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4854 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4855 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4856 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4857 { "sb_g2",
4858 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4859 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4860 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4861 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4862
4863 /* Given the address of a pointer pointing to the textual name of a group
4864 relocation as may appear in assembler source, attempt to find its details
4865 in group_reloc_table. The pointer will be updated to the character after
4866 the trailing colon. On failure, FAIL will be returned; SUCCESS
4867 otherwise. On success, *entry will be updated to point at the relevant
4868 group_reloc_table entry. */
4869
4870 static int
4871 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4872 {
4873 unsigned int i;
4874 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4875 {
4876 int length = strlen (group_reloc_table[i].name);
4877
4878 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4879 && (*str)[length] == ':')
4880 {
4881 *out = &group_reloc_table[i];
4882 *str += (length + 1);
4883 return SUCCESS;
4884 }
4885 }
4886
4887 return FAIL;
4888 }
4889
4890 /* Parse a <shifter_operand> for an ARM data processing instruction
4891 (as for parse_shifter_operand) where group relocations are allowed:
4892
4893 #<immediate>
4894 #<immediate>, <rotate>
4895 #:<group_reloc>:<expression>
4896 <Rm>
4897 <Rm>, <shift>
4898
4899 where <group_reloc> is one of the strings defined in group_reloc_table.
4900 The hashes are optional.
4901
4902 Everything else is as for parse_shifter_operand. */
4903
4904 static parse_operand_result
4905 parse_shifter_operand_group_reloc (char **str, int i)
4906 {
4907 /* Determine if we have the sequence of characters #: or just :
4908 coming next. If we do, then we check for a group relocation.
4909 If we don't, punt the whole lot to parse_shifter_operand. */
4910
4911 if (((*str)[0] == '#' && (*str)[1] == ':')
4912 || (*str)[0] == ':')
4913 {
4914 struct group_reloc_table_entry *entry;
4915
4916 if ((*str)[0] == '#')
4917 (*str) += 2;
4918 else
4919 (*str)++;
4920
4921 /* Try to parse a group relocation. Anything else is an error. */
4922 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4923 {
4924 inst.error = _("unknown group relocation");
4925 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4926 }
4927
4928 /* We now have the group relocation table entry corresponding to
4929 the name in the assembler source. Next, we parse the expression. */
4930 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4931 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4932
4933 /* Record the relocation type (always the ALU variant here). */
4934 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
4935 gas_assert (inst.reloc.type != 0);
4936
4937 return PARSE_OPERAND_SUCCESS;
4938 }
4939 else
4940 return parse_shifter_operand (str, i) == SUCCESS
4941 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4942
4943 /* Never reached. */
4944 }
4945
4946 /* Parse all forms of an ARM address expression. Information is written
4947 to inst.operands[i] and/or inst.reloc.
4948
4949 Preindexed addressing (.preind=1):
4950
4951 [Rn, #offset] .reg=Rn .reloc.exp=offset
4952 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4953 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4954 .shift_kind=shift .reloc.exp=shift_imm
4955
4956 These three may have a trailing ! which causes .writeback to be set also.
4957
4958 Postindexed addressing (.postind=1, .writeback=1):
4959
4960 [Rn], #offset .reg=Rn .reloc.exp=offset
4961 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4962 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4963 .shift_kind=shift .reloc.exp=shift_imm
4964
4965 Unindexed addressing (.preind=0, .postind=0):
4966
4967 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4968
4969 Other:
4970
4971 [Rn]{!} shorthand for [Rn,#0]{!}
4972 =immediate .isreg=0 .reloc.exp=immediate
4973 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4974
4975 It is the caller's responsibility to check for addressing modes not
4976 supported by the instruction, and to set inst.reloc.type. */
4977
4978 static parse_operand_result
4979 parse_address_main (char **str, int i, int group_relocations,
4980 group_reloc_type group_type)
4981 {
4982 char *p = *str;
4983 int reg;
4984
4985 if (skip_past_char (&p, '[') == FAIL)
4986 {
4987 if (skip_past_char (&p, '=') == FAIL)
4988 {
4989 /* Bare address - translate to PC-relative offset. */
4990 inst.reloc.pc_rel = 1;
4991 inst.operands[i].reg = REG_PC;
4992 inst.operands[i].isreg = 1;
4993 inst.operands[i].preind = 1;
4994 }
4995 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
4996
4997 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4998 return PARSE_OPERAND_FAIL;
4999
5000 *str = p;
5001 return PARSE_OPERAND_SUCCESS;
5002 }
5003
5004 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5005 {
5006 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5007 return PARSE_OPERAND_FAIL;
5008 }
5009 inst.operands[i].reg = reg;
5010 inst.operands[i].isreg = 1;
5011
5012 if (skip_past_comma (&p) == SUCCESS)
5013 {
5014 inst.operands[i].preind = 1;
5015
5016 if (*p == '+') p++;
5017 else if (*p == '-') p++, inst.operands[i].negative = 1;
5018
5019 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5020 {
5021 inst.operands[i].imm = reg;
5022 inst.operands[i].immisreg = 1;
5023
5024 if (skip_past_comma (&p) == SUCCESS)
5025 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5026 return PARSE_OPERAND_FAIL;
5027 }
5028 else if (skip_past_char (&p, ':') == SUCCESS)
5029 {
5030 /* FIXME: '@' should be used here, but it's filtered out by generic
5031 code before we get to see it here. This may be subject to
5032 change. */
5033 expressionS exp;
5034 my_get_expression (&exp, &p, GE_NO_PREFIX);
5035 if (exp.X_op != O_constant)
5036 {
5037 inst.error = _("alignment must be constant");
5038 return PARSE_OPERAND_FAIL;
5039 }
5040 inst.operands[i].imm = exp.X_add_number << 8;
5041 inst.operands[i].immisalign = 1;
5042 /* Alignments are not pre-indexes. */
5043 inst.operands[i].preind = 0;
5044 }
5045 else
5046 {
5047 if (inst.operands[i].negative)
5048 {
5049 inst.operands[i].negative = 0;
5050 p--;
5051 }
5052
5053 if (group_relocations
5054 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5055 {
5056 struct group_reloc_table_entry *entry;
5057
5058 /* Skip over the #: or : sequence. */
5059 if (*p == '#')
5060 p += 2;
5061 else
5062 p++;
5063
5064 /* Try to parse a group relocation. Anything else is an
5065 error. */
5066 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5067 {
5068 inst.error = _("unknown group relocation");
5069 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5070 }
5071
5072 /* We now have the group relocation table entry corresponding to
5073 the name in the assembler source. Next, we parse the
5074 expression. */
5075 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5076 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5077
5078 /* Record the relocation type. */
5079 switch (group_type)
5080 {
5081 case GROUP_LDR:
5082 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5083 break;
5084
5085 case GROUP_LDRS:
5086 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5087 break;
5088
5089 case GROUP_LDC:
5090 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5091 break;
5092
5093 default:
5094 gas_assert (0);
5095 }
5096
5097 if (inst.reloc.type == 0)
5098 {
5099 inst.error = _("this group relocation is not allowed on this instruction");
5100 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5101 }
5102 }
5103 else
5104 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5105 return PARSE_OPERAND_FAIL;
5106 }
5107 }
5108
5109 if (skip_past_char (&p, ']') == FAIL)
5110 {
5111 inst.error = _("']' expected");
5112 return PARSE_OPERAND_FAIL;
5113 }
5114
5115 if (skip_past_char (&p, '!') == SUCCESS)
5116 inst.operands[i].writeback = 1;
5117
5118 else if (skip_past_comma (&p) == SUCCESS)
5119 {
5120 if (skip_past_char (&p, '{') == SUCCESS)
5121 {
5122 /* [Rn], {expr} - unindexed, with option */
5123 if (parse_immediate (&p, &inst.operands[i].imm,
5124 0, 255, TRUE) == FAIL)
5125 return PARSE_OPERAND_FAIL;
5126
5127 if (skip_past_char (&p, '}') == FAIL)
5128 {
5129 inst.error = _("'}' expected at end of 'option' field");
5130 return PARSE_OPERAND_FAIL;
5131 }
5132 if (inst.operands[i].preind)
5133 {
5134 inst.error = _("cannot combine index with option");
5135 return PARSE_OPERAND_FAIL;
5136 }
5137 *str = p;
5138 return PARSE_OPERAND_SUCCESS;
5139 }
5140 else
5141 {
5142 inst.operands[i].postind = 1;
5143 inst.operands[i].writeback = 1;
5144
5145 if (inst.operands[i].preind)
5146 {
5147 inst.error = _("cannot combine pre- and post-indexing");
5148 return PARSE_OPERAND_FAIL;
5149 }
5150
5151 if (*p == '+') p++;
5152 else if (*p == '-') p++, inst.operands[i].negative = 1;
5153
5154 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5155 {
5156 /* We might be using the immediate for alignment already. If we
5157 are, OR the register number into the low-order bits. */
5158 if (inst.operands[i].immisalign)
5159 inst.operands[i].imm |= reg;
5160 else
5161 inst.operands[i].imm = reg;
5162 inst.operands[i].immisreg = 1;
5163
5164 if (skip_past_comma (&p) == SUCCESS)
5165 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5166 return PARSE_OPERAND_FAIL;
5167 }
5168 else
5169 {
5170 if (inst.operands[i].negative)
5171 {
5172 inst.operands[i].negative = 0;
5173 p--;
5174 }
5175 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5176 return PARSE_OPERAND_FAIL;
5177 }
5178 }
5179 }
5180
5181 /* If at this point neither .preind nor .postind is set, we have a
5182 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5183 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5184 {
5185 inst.operands[i].preind = 1;
5186 inst.reloc.exp.X_op = O_constant;
5187 inst.reloc.exp.X_add_number = 0;
5188 }
5189 *str = p;
5190 return PARSE_OPERAND_SUCCESS;
5191 }
5192
5193 static int
5194 parse_address (char **str, int i)
5195 {
5196 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5197 ? SUCCESS : FAIL;
5198 }
5199
5200 static parse_operand_result
5201 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5202 {
5203 return parse_address_main (str, i, 1, type);
5204 }
5205
5206 /* Parse an operand for a MOVW or MOVT instruction. */
5207 static int
5208 parse_half (char **str)
5209 {
5210 char * p;
5211
5212 p = *str;
5213 skip_past_char (&p, '#');
5214 if (strncasecmp (p, ":lower16:", 9) == 0)
5215 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5216 else if (strncasecmp (p, ":upper16:", 9) == 0)
5217 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5218
5219 if (inst.reloc.type != BFD_RELOC_UNUSED)
5220 {
5221 p += 9;
5222 skip_whitespace (p);
5223 }
5224
5225 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5226 return FAIL;
5227
5228 if (inst.reloc.type == BFD_RELOC_UNUSED)
5229 {
5230 if (inst.reloc.exp.X_op != O_constant)
5231 {
5232 inst.error = _("constant expression expected");
5233 return FAIL;
5234 }
5235 if (inst.reloc.exp.X_add_number < 0
5236 || inst.reloc.exp.X_add_number > 0xffff)
5237 {
5238 inst.error = _("immediate value out of range");
5239 return FAIL;
5240 }
5241 }
5242 *str = p;
5243 return SUCCESS;
5244 }
5245
5246 /* Miscellaneous. */
5247
5248 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5249 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5250 static int
5251 parse_psr (char **str)
5252 {
5253 char *p;
5254 unsigned long psr_field;
5255 const struct asm_psr *psr;
5256 char *start;
5257
5258 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5259 feature for ease of use and backwards compatibility. */
5260 p = *str;
5261 if (strncasecmp (p, "SPSR", 4) == 0)
5262 psr_field = SPSR_BIT;
5263 else if (strncasecmp (p, "CPSR", 4) == 0)
5264 psr_field = 0;
5265 else
5266 {
5267 start = p;
5268 do
5269 p++;
5270 while (ISALNUM (*p) || *p == '_');
5271
5272 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5273 p - start);
5274 if (!psr)
5275 return FAIL;
5276
5277 *str = p;
5278 return psr->field;
5279 }
5280
5281 p += 4;
5282 if (*p == '_')
5283 {
5284 /* A suffix follows. */
5285 p++;
5286 start = p;
5287
5288 do
5289 p++;
5290 while (ISALNUM (*p) || *p == '_');
5291
5292 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5293 p - start);
5294 if (!psr)
5295 goto error;
5296
5297 psr_field |= psr->field;
5298 }
5299 else
5300 {
5301 if (ISALNUM (*p))
5302 goto error; /* Garbage after "[CS]PSR". */
5303
5304 psr_field |= (PSR_c | PSR_f);
5305 }
5306 *str = p;
5307 return psr_field;
5308
5309 error:
5310 inst.error = _("flag for {c}psr instruction expected");
5311 return FAIL;
5312 }
5313
5314 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5315 value suitable for splatting into the AIF field of the instruction. */
5316
5317 static int
5318 parse_cps_flags (char **str)
5319 {
5320 int val = 0;
5321 int saw_a_flag = 0;
5322 char *s = *str;
5323
5324 for (;;)
5325 switch (*s++)
5326 {
5327 case '\0': case ',':
5328 goto done;
5329
5330 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5331 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5332 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5333
5334 default:
5335 inst.error = _("unrecognized CPS flag");
5336 return FAIL;
5337 }
5338
5339 done:
5340 if (saw_a_flag == 0)
5341 {
5342 inst.error = _("missing CPS flags");
5343 return FAIL;
5344 }
5345
5346 *str = s - 1;
5347 return val;
5348 }
5349
5350 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5351 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5352
5353 static int
5354 parse_endian_specifier (char **str)
5355 {
5356 int little_endian;
5357 char *s = *str;
5358
5359 if (strncasecmp (s, "BE", 2))
5360 little_endian = 0;
5361 else if (strncasecmp (s, "LE", 2))
5362 little_endian = 1;
5363 else
5364 {
5365 inst.error = _("valid endian specifiers are be or le");
5366 return FAIL;
5367 }
5368
5369 if (ISALNUM (s[2]) || s[2] == '_')
5370 {
5371 inst.error = _("valid endian specifiers are be or le");
5372 return FAIL;
5373 }
5374
5375 *str = s + 2;
5376 return little_endian;
5377 }
5378
5379 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5380 value suitable for poking into the rotate field of an sxt or sxta
5381 instruction, or FAIL on error. */
5382
5383 static int
5384 parse_ror (char **str)
5385 {
5386 int rot;
5387 char *s = *str;
5388
5389 if (strncasecmp (s, "ROR", 3) == 0)
5390 s += 3;
5391 else
5392 {
5393 inst.error = _("missing rotation field after comma");
5394 return FAIL;
5395 }
5396
5397 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5398 return FAIL;
5399
5400 switch (rot)
5401 {
5402 case 0: *str = s; return 0x0;
5403 case 8: *str = s; return 0x1;
5404 case 16: *str = s; return 0x2;
5405 case 24: *str = s; return 0x3;
5406
5407 default:
5408 inst.error = _("rotation can only be 0, 8, 16, or 24");
5409 return FAIL;
5410 }
5411 }
5412
5413 /* Parse a conditional code (from conds[] below). The value returned is in the
5414 range 0 .. 14, or FAIL. */
5415 static int
5416 parse_cond (char **str)
5417 {
5418 char *q;
5419 const struct asm_cond *c;
5420 int n;
5421 /* Condition codes are always 2 characters, so matching up to
5422 3 characters is sufficient. */
5423 char cond[3];
5424
5425 q = *str;
5426 n = 0;
5427 while (ISALPHA (*q) && n < 3)
5428 {
5429 cond[n] = TOLOWER (*q);
5430 q++;
5431 n++;
5432 }
5433
5434 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5435 if (!c)
5436 {
5437 inst.error = _("condition required");
5438 return FAIL;
5439 }
5440
5441 *str = q;
5442 return c->value;
5443 }
5444
5445 /* Parse an option for a barrier instruction. Returns the encoding for the
5446 option, or FAIL. */
5447 static int
5448 parse_barrier (char **str)
5449 {
5450 char *p, *q;
5451 const struct asm_barrier_opt *o;
5452
5453 p = q = *str;
5454 while (ISALPHA (*q))
5455 q++;
5456
5457 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5458 q - p);
5459 if (!o)
5460 return FAIL;
5461
5462 *str = q;
5463 return o->value;
5464 }
5465
5466 /* Parse the operands of a table branch instruction. Similar to a memory
5467 operand. */
5468 static int
5469 parse_tb (char **str)
5470 {
5471 char * p = *str;
5472 int reg;
5473
5474 if (skip_past_char (&p, '[') == FAIL)
5475 {
5476 inst.error = _("'[' expected");
5477 return FAIL;
5478 }
5479
5480 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5481 {
5482 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5483 return FAIL;
5484 }
5485 inst.operands[0].reg = reg;
5486
5487 if (skip_past_comma (&p) == FAIL)
5488 {
5489 inst.error = _("',' expected");
5490 return FAIL;
5491 }
5492
5493 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5494 {
5495 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5496 return FAIL;
5497 }
5498 inst.operands[0].imm = reg;
5499
5500 if (skip_past_comma (&p) == SUCCESS)
5501 {
5502 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5503 return FAIL;
5504 if (inst.reloc.exp.X_add_number != 1)
5505 {
5506 inst.error = _("invalid shift");
5507 return FAIL;
5508 }
5509 inst.operands[0].shifted = 1;
5510 }
5511
5512 if (skip_past_char (&p, ']') == FAIL)
5513 {
5514 inst.error = _("']' expected");
5515 return FAIL;
5516 }
5517 *str = p;
5518 return SUCCESS;
5519 }
5520
5521 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5522 information on the types the operands can take and how they are encoded.
5523 Up to four operands may be read; this function handles setting the
5524 ".present" field for each read operand itself.
5525 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5526 else returns FAIL. */
5527
5528 static int
5529 parse_neon_mov (char **str, int *which_operand)
5530 {
5531 int i = *which_operand, val;
5532 enum arm_reg_type rtype;
5533 char *ptr = *str;
5534 struct neon_type_el optype;
5535
5536 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5537 {
5538 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5539 inst.operands[i].reg = val;
5540 inst.operands[i].isscalar = 1;
5541 inst.operands[i].vectype = optype;
5542 inst.operands[i++].present = 1;
5543
5544 if (skip_past_comma (&ptr) == FAIL)
5545 goto wanted_comma;
5546
5547 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5548 goto wanted_arm;
5549
5550 inst.operands[i].reg = val;
5551 inst.operands[i].isreg = 1;
5552 inst.operands[i].present = 1;
5553 }
5554 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5555 != FAIL)
5556 {
5557 /* Cases 0, 1, 2, 3, 5 (D only). */
5558 if (skip_past_comma (&ptr) == FAIL)
5559 goto wanted_comma;
5560
5561 inst.operands[i].reg = val;
5562 inst.operands[i].isreg = 1;
5563 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5564 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5565 inst.operands[i].isvec = 1;
5566 inst.operands[i].vectype = optype;
5567 inst.operands[i++].present = 1;
5568
5569 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5570 {
5571 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5572 Case 13: VMOV <Sd>, <Rm> */
5573 inst.operands[i].reg = val;
5574 inst.operands[i].isreg = 1;
5575 inst.operands[i].present = 1;
5576
5577 if (rtype == REG_TYPE_NQ)
5578 {
5579 first_error (_("can't use Neon quad register here"));
5580 return FAIL;
5581 }
5582 else if (rtype != REG_TYPE_VFS)
5583 {
5584 i++;
5585 if (skip_past_comma (&ptr) == FAIL)
5586 goto wanted_comma;
5587 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5588 goto wanted_arm;
5589 inst.operands[i].reg = val;
5590 inst.operands[i].isreg = 1;
5591 inst.operands[i].present = 1;
5592 }
5593 }
5594 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5595 &optype)) != FAIL)
5596 {
5597 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5598 Case 1: VMOV<c><q> <Dd>, <Dm>
5599 Case 8: VMOV.F32 <Sd>, <Sm>
5600 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5601
5602 inst.operands[i].reg = val;
5603 inst.operands[i].isreg = 1;
5604 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5605 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5606 inst.operands[i].isvec = 1;
5607 inst.operands[i].vectype = optype;
5608 inst.operands[i].present = 1;
5609
5610 if (skip_past_comma (&ptr) == SUCCESS)
5611 {
5612 /* Case 15. */
5613 i++;
5614
5615 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5616 goto wanted_arm;
5617
5618 inst.operands[i].reg = val;
5619 inst.operands[i].isreg = 1;
5620 inst.operands[i++].present = 1;
5621
5622 if (skip_past_comma (&ptr) == FAIL)
5623 goto wanted_comma;
5624
5625 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5626 goto wanted_arm;
5627
5628 inst.operands[i].reg = val;
5629 inst.operands[i].isreg = 1;
5630 inst.operands[i++].present = 1;
5631 }
5632 }
5633 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5634 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5635 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5636 Case 10: VMOV.F32 <Sd>, #<imm>
5637 Case 11: VMOV.F64 <Dd>, #<imm> */
5638 inst.operands[i].immisfloat = 1;
5639 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5640 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5641 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5642 ;
5643 else
5644 {
5645 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5646 return FAIL;
5647 }
5648 }
5649 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5650 {
5651 /* Cases 6, 7. */
5652 inst.operands[i].reg = val;
5653 inst.operands[i].isreg = 1;
5654 inst.operands[i++].present = 1;
5655
5656 if (skip_past_comma (&ptr) == FAIL)
5657 goto wanted_comma;
5658
5659 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5660 {
5661 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5662 inst.operands[i].reg = val;
5663 inst.operands[i].isscalar = 1;
5664 inst.operands[i].present = 1;
5665 inst.operands[i].vectype = optype;
5666 }
5667 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5668 {
5669 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5670 inst.operands[i].reg = val;
5671 inst.operands[i].isreg = 1;
5672 inst.operands[i++].present = 1;
5673
5674 if (skip_past_comma (&ptr) == FAIL)
5675 goto wanted_comma;
5676
5677 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5678 == FAIL)
5679 {
5680 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5681 return FAIL;
5682 }
5683
5684 inst.operands[i].reg = val;
5685 inst.operands[i].isreg = 1;
5686 inst.operands[i].isvec = 1;
5687 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5688 inst.operands[i].vectype = optype;
5689 inst.operands[i].present = 1;
5690
5691 if (rtype == REG_TYPE_VFS)
5692 {
5693 /* Case 14. */
5694 i++;
5695 if (skip_past_comma (&ptr) == FAIL)
5696 goto wanted_comma;
5697 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5698 &optype)) == FAIL)
5699 {
5700 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5701 return FAIL;
5702 }
5703 inst.operands[i].reg = val;
5704 inst.operands[i].isreg = 1;
5705 inst.operands[i].isvec = 1;
5706 inst.operands[i].issingle = 1;
5707 inst.operands[i].vectype = optype;
5708 inst.operands[i].present = 1;
5709 }
5710 }
5711 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5712 != FAIL)
5713 {
5714 /* Case 13. */
5715 inst.operands[i].reg = val;
5716 inst.operands[i].isreg = 1;
5717 inst.operands[i].isvec = 1;
5718 inst.operands[i].issingle = 1;
5719 inst.operands[i].vectype = optype;
5720 inst.operands[i++].present = 1;
5721 }
5722 }
5723 else
5724 {
5725 first_error (_("parse error"));
5726 return FAIL;
5727 }
5728
5729 /* Successfully parsed the operands. Update args. */
5730 *which_operand = i;
5731 *str = ptr;
5732 return SUCCESS;
5733
5734 wanted_comma:
5735 first_error (_("expected comma"));
5736 return FAIL;
5737
5738 wanted_arm:
5739 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5740 return FAIL;
5741 }
5742
5743 /* Use this macro when the operand constraints are different
5744 for ARM and THUMB (e.g. ldrd). */
5745 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
5746 ((arm_operand) | ((thumb_operand) << 16))
5747
5748 /* Matcher codes for parse_operands. */
5749 enum operand_parse_code
5750 {
5751 OP_stop, /* end of line */
5752
5753 OP_RR, /* ARM register */
5754 OP_RRnpc, /* ARM register, not r15 */
5755 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
5756 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5757 OP_RRw, /* ARM register, not r15, optional trailing ! */
5758 OP_RCP, /* Coprocessor number */
5759 OP_RCN, /* Coprocessor register */
5760 OP_RF, /* FPA register */
5761 OP_RVS, /* VFP single precision register */
5762 OP_RVD, /* VFP double precision register (0..15) */
5763 OP_RND, /* Neon double precision register (0..31) */
5764 OP_RNQ, /* Neon quad precision register */
5765 OP_RVSD, /* VFP single or double precision register */
5766 OP_RNDQ, /* Neon double or quad precision register */
5767 OP_RNSDQ, /* Neon single, double or quad precision register */
5768 OP_RNSC, /* Neon scalar D[X] */
5769 OP_RVC, /* VFP control register */
5770 OP_RMF, /* Maverick F register */
5771 OP_RMD, /* Maverick D register */
5772 OP_RMFX, /* Maverick FX register */
5773 OP_RMDX, /* Maverick DX register */
5774 OP_RMAX, /* Maverick AX register */
5775 OP_RMDS, /* Maverick DSPSC register */
5776 OP_RIWR, /* iWMMXt wR register */
5777 OP_RIWC, /* iWMMXt wC register */
5778 OP_RIWG, /* iWMMXt wCG register */
5779 OP_RXA, /* XScale accumulator register */
5780
5781 OP_REGLST, /* ARM register list */
5782 OP_VRSLST, /* VFP single-precision register list */
5783 OP_VRDLST, /* VFP double-precision register list */
5784 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5785 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5786 OP_NSTRLST, /* Neon element/structure list */
5787
5788 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5789 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5790 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5791 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5792 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5793 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5794 OP_VMOV, /* Neon VMOV operands. */
5795 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
5796 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5797 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5798
5799 OP_I0, /* immediate zero */
5800 OP_I7, /* immediate value 0 .. 7 */
5801 OP_I15, /* 0 .. 15 */
5802 OP_I16, /* 1 .. 16 */
5803 OP_I16z, /* 0 .. 16 */
5804 OP_I31, /* 0 .. 31 */
5805 OP_I31w, /* 0 .. 31, optional trailing ! */
5806 OP_I32, /* 1 .. 32 */
5807 OP_I32z, /* 0 .. 32 */
5808 OP_I63, /* 0 .. 63 */
5809 OP_I63s, /* -64 .. 63 */
5810 OP_I64, /* 1 .. 64 */
5811 OP_I64z, /* 0 .. 64 */
5812 OP_I255, /* 0 .. 255 */
5813
5814 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5815 OP_I7b, /* 0 .. 7 */
5816 OP_I15b, /* 0 .. 15 */
5817 OP_I31b, /* 0 .. 31 */
5818
5819 OP_SH, /* shifter operand */
5820 OP_SHG, /* shifter operand with possible group relocation */
5821 OP_ADDR, /* Memory address expression (any mode) */
5822 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5823 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5824 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5825 OP_EXP, /* arbitrary expression */
5826 OP_EXPi, /* same, with optional immediate prefix */
5827 OP_EXPr, /* same, with optional relocation suffix */
5828 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5829
5830 OP_CPSF, /* CPS flags */
5831 OP_ENDI, /* Endianness specifier */
5832 OP_PSR, /* CPSR/SPSR mask for msr */
5833 OP_COND, /* conditional code */
5834 OP_TB, /* Table branch. */
5835
5836 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5837 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5838
5839 OP_RRnpc_I0, /* ARM register or literal 0 */
5840 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5841 OP_RR_EXi, /* ARM register or expression with imm prefix */
5842 OP_RF_IF, /* FPA register or immediate */
5843 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5844 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5845
5846 /* Optional operands. */
5847 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5848 OP_oI31b, /* 0 .. 31 */
5849 OP_oI32b, /* 1 .. 32 */
5850 OP_oIffffb, /* 0 .. 65535 */
5851 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5852
5853 OP_oRR, /* ARM register */
5854 OP_oRRnpc, /* ARM register, not the PC */
5855 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
5856 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5857 OP_oRND, /* Optional Neon double precision register */
5858 OP_oRNQ, /* Optional Neon quad precision register */
5859 OP_oRNDQ, /* Optional Neon double or quad precision register */
5860 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5861 OP_oSHll, /* LSL immediate */
5862 OP_oSHar, /* ASR immediate */
5863 OP_oSHllar, /* LSL or ASR immediate */
5864 OP_oROR, /* ROR 0/8/16/24 */
5865 OP_oBARRIER, /* Option argument for a barrier instruction. */
5866
5867 /* Some pre-defined mixed (ARM/THUMB) operands. */
5868 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
5869 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
5870 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
5871
5872 OP_FIRST_OPTIONAL = OP_oI7b
5873 };
5874
5875 /* Generic instruction operand parser. This does no encoding and no
5876 semantic validation; it merely squirrels values away in the inst
5877 structure. Returns SUCCESS or FAIL depending on whether the
5878 specified grammar matched. */
5879 static int
5880 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
5881 {
5882 unsigned const int *upat = pattern;
5883 char *backtrack_pos = 0;
5884 const char *backtrack_error = 0;
5885 int i, val, backtrack_index = 0;
5886 enum arm_reg_type rtype;
5887 parse_operand_result result;
5888 unsigned int op_parse_code;
5889
5890 #define po_char_or_fail(chr) \
5891 do \
5892 { \
5893 if (skip_past_char (&str, chr) == FAIL) \
5894 goto bad_args; \
5895 } \
5896 while (0)
5897
5898 #define po_reg_or_fail(regtype) \
5899 do \
5900 { \
5901 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5902 & inst.operands[i].vectype); \
5903 if (val == FAIL) \
5904 { \
5905 first_error (_(reg_expected_msgs[regtype])); \
5906 goto failure; \
5907 } \
5908 inst.operands[i].reg = val; \
5909 inst.operands[i].isreg = 1; \
5910 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5911 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5912 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5913 || rtype == REG_TYPE_VFD \
5914 || rtype == REG_TYPE_NQ); \
5915 } \
5916 while (0)
5917
5918 #define po_reg_or_goto(regtype, label) \
5919 do \
5920 { \
5921 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5922 & inst.operands[i].vectype); \
5923 if (val == FAIL) \
5924 goto label; \
5925 \
5926 inst.operands[i].reg = val; \
5927 inst.operands[i].isreg = 1; \
5928 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5929 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5930 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5931 || rtype == REG_TYPE_VFD \
5932 || rtype == REG_TYPE_NQ); \
5933 } \
5934 while (0)
5935
5936 #define po_imm_or_fail(min, max, popt) \
5937 do \
5938 { \
5939 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5940 goto failure; \
5941 inst.operands[i].imm = val; \
5942 } \
5943 while (0)
5944
5945 #define po_scalar_or_goto(elsz, label) \
5946 do \
5947 { \
5948 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
5949 if (val == FAIL) \
5950 goto label; \
5951 inst.operands[i].reg = val; \
5952 inst.operands[i].isscalar = 1; \
5953 } \
5954 while (0)
5955
5956 #define po_misc_or_fail(expr) \
5957 do \
5958 { \
5959 if (expr) \
5960 goto failure; \
5961 } \
5962 while (0)
5963
5964 #define po_misc_or_fail_no_backtrack(expr) \
5965 do \
5966 { \
5967 result = expr; \
5968 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
5969 backtrack_pos = 0; \
5970 if (result != PARSE_OPERAND_SUCCESS) \
5971 goto failure; \
5972 } \
5973 while (0)
5974
5975 skip_whitespace (str);
5976
5977 for (i = 0; upat[i] != OP_stop; i++)
5978 {
5979 op_parse_code = upat[i];
5980 if (op_parse_code >= 1<<16)
5981 op_parse_code = thumb ? (op_parse_code >> 16)
5982 : (op_parse_code & ((1<<16)-1));
5983
5984 if (op_parse_code >= OP_FIRST_OPTIONAL)
5985 {
5986 /* Remember where we are in case we need to backtrack. */
5987 gas_assert (!backtrack_pos);
5988 backtrack_pos = str;
5989 backtrack_error = inst.error;
5990 backtrack_index = i;
5991 }
5992
5993 if (i > 0 && (i > 1 || inst.operands[0].present))
5994 po_char_or_fail (',');
5995
5996 switch (op_parse_code)
5997 {
5998 /* Registers */
5999 case OP_oRRnpc:
6000 case OP_oRRnpcsp:
6001 case OP_RRnpc:
6002 case OP_RRnpcsp:
6003 case OP_oRR:
6004 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6005 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6006 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6007 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6008 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6009 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6010 case OP_oRND:
6011 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6012 case OP_RVC:
6013 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6014 break;
6015 /* Also accept generic coprocessor regs for unknown registers. */
6016 coproc_reg:
6017 po_reg_or_fail (REG_TYPE_CN);
6018 break;
6019 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6020 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6021 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6022 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6023 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6024 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6025 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6026 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6027 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6028 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6029 case OP_oRNQ:
6030 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6031 case OP_oRNDQ:
6032 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6033 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6034 case OP_oRNSDQ:
6035 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6036
6037 /* Neon scalar. Using an element size of 8 means that some invalid
6038 scalars are accepted here, so deal with those in later code. */
6039 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6040
6041 case OP_RNDQ_I0:
6042 {
6043 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6044 break;
6045 try_imm0:
6046 po_imm_or_fail (0, 0, TRUE);
6047 }
6048 break;
6049
6050 case OP_RVSD_I0:
6051 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6052 break;
6053
6054 case OP_RR_RNSC:
6055 {
6056 po_scalar_or_goto (8, try_rr);
6057 break;
6058 try_rr:
6059 po_reg_or_fail (REG_TYPE_RN);
6060 }
6061 break;
6062
6063 case OP_RNSDQ_RNSC:
6064 {
6065 po_scalar_or_goto (8, try_nsdq);
6066 break;
6067 try_nsdq:
6068 po_reg_or_fail (REG_TYPE_NSDQ);
6069 }
6070 break;
6071
6072 case OP_RNDQ_RNSC:
6073 {
6074 po_scalar_or_goto (8, try_ndq);
6075 break;
6076 try_ndq:
6077 po_reg_or_fail (REG_TYPE_NDQ);
6078 }
6079 break;
6080
6081 case OP_RND_RNSC:
6082 {
6083 po_scalar_or_goto (8, try_vfd);
6084 break;
6085 try_vfd:
6086 po_reg_or_fail (REG_TYPE_VFD);
6087 }
6088 break;
6089
6090 case OP_VMOV:
6091 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6092 not careful then bad things might happen. */
6093 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6094 break;
6095
6096 case OP_RNDQ_Ibig:
6097 {
6098 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6099 break;
6100 try_immbig:
6101 /* There's a possibility of getting a 64-bit immediate here, so
6102 we need special handling. */
6103 if (parse_big_immediate (&str, i) == FAIL)
6104 {
6105 inst.error = _("immediate value is out of range");
6106 goto failure;
6107 }
6108 }
6109 break;
6110
6111 case OP_RNDQ_I63b:
6112 {
6113 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6114 break;
6115 try_shimm:
6116 po_imm_or_fail (0, 63, TRUE);
6117 }
6118 break;
6119
6120 case OP_RRnpcb:
6121 po_char_or_fail ('[');
6122 po_reg_or_fail (REG_TYPE_RN);
6123 po_char_or_fail (']');
6124 break;
6125
6126 case OP_RRw:
6127 case OP_oRRw:
6128 po_reg_or_fail (REG_TYPE_RN);
6129 if (skip_past_char (&str, '!') == SUCCESS)
6130 inst.operands[i].writeback = 1;
6131 break;
6132
6133 /* Immediates */
6134 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6135 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6136 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6137 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6138 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6139 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6140 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6141 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6142 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6143 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6144 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6145 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6146
6147 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6148 case OP_oI7b:
6149 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6150 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6151 case OP_oI31b:
6152 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6153 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6154 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6155
6156 /* Immediate variants */
6157 case OP_oI255c:
6158 po_char_or_fail ('{');
6159 po_imm_or_fail (0, 255, TRUE);
6160 po_char_or_fail ('}');
6161 break;
6162
6163 case OP_I31w:
6164 /* The expression parser chokes on a trailing !, so we have
6165 to find it first and zap it. */
6166 {
6167 char *s = str;
6168 while (*s && *s != ',')
6169 s++;
6170 if (s[-1] == '!')
6171 {
6172 s[-1] = '\0';
6173 inst.operands[i].writeback = 1;
6174 }
6175 po_imm_or_fail (0, 31, TRUE);
6176 if (str == s - 1)
6177 str = s;
6178 }
6179 break;
6180
6181 /* Expressions */
6182 case OP_EXPi: EXPi:
6183 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6184 GE_OPT_PREFIX));
6185 break;
6186
6187 case OP_EXP:
6188 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6189 GE_NO_PREFIX));
6190 break;
6191
6192 case OP_EXPr: EXPr:
6193 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6194 GE_NO_PREFIX));
6195 if (inst.reloc.exp.X_op == O_symbol)
6196 {
6197 val = parse_reloc (&str);
6198 if (val == -1)
6199 {
6200 inst.error = _("unrecognized relocation suffix");
6201 goto failure;
6202 }
6203 else if (val != BFD_RELOC_UNUSED)
6204 {
6205 inst.operands[i].imm = val;
6206 inst.operands[i].hasreloc = 1;
6207 }
6208 }
6209 break;
6210
6211 /* Operand for MOVW or MOVT. */
6212 case OP_HALF:
6213 po_misc_or_fail (parse_half (&str));
6214 break;
6215
6216 /* Register or expression. */
6217 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6218 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6219
6220 /* Register or immediate. */
6221 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6222 I0: po_imm_or_fail (0, 0, FALSE); break;
6223
6224 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6225 IF:
6226 if (!is_immediate_prefix (*str))
6227 goto bad_args;
6228 str++;
6229 val = parse_fpa_immediate (&str);
6230 if (val == FAIL)
6231 goto failure;
6232 /* FPA immediates are encoded as registers 8-15.
6233 parse_fpa_immediate has already applied the offset. */
6234 inst.operands[i].reg = val;
6235 inst.operands[i].isreg = 1;
6236 break;
6237
6238 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6239 I32z: po_imm_or_fail (0, 32, FALSE); break;
6240
6241 /* Two kinds of register. */
6242 case OP_RIWR_RIWC:
6243 {
6244 struct reg_entry *rege = arm_reg_parse_multi (&str);
6245 if (!rege
6246 || (rege->type != REG_TYPE_MMXWR
6247 && rege->type != REG_TYPE_MMXWC
6248 && rege->type != REG_TYPE_MMXWCG))
6249 {
6250 inst.error = _("iWMMXt data or control register expected");
6251 goto failure;
6252 }
6253 inst.operands[i].reg = rege->number;
6254 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6255 }
6256 break;
6257
6258 case OP_RIWC_RIWG:
6259 {
6260 struct reg_entry *rege = arm_reg_parse_multi (&str);
6261 if (!rege
6262 || (rege->type != REG_TYPE_MMXWC
6263 && rege->type != REG_TYPE_MMXWCG))
6264 {
6265 inst.error = _("iWMMXt control register expected");
6266 goto failure;
6267 }
6268 inst.operands[i].reg = rege->number;
6269 inst.operands[i].isreg = 1;
6270 }
6271 break;
6272
6273 /* Misc */
6274 case OP_CPSF: val = parse_cps_flags (&str); break;
6275 case OP_ENDI: val = parse_endian_specifier (&str); break;
6276 case OP_oROR: val = parse_ror (&str); break;
6277 case OP_PSR: val = parse_psr (&str); break;
6278 case OP_COND: val = parse_cond (&str); break;
6279 case OP_oBARRIER:val = parse_barrier (&str); break;
6280
6281 case OP_RVC_PSR:
6282 po_reg_or_goto (REG_TYPE_VFC, try_psr);
6283 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
6284 break;
6285 try_psr:
6286 val = parse_psr (&str);
6287 break;
6288
6289 case OP_APSR_RR:
6290 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6291 break;
6292 try_apsr:
6293 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6294 instruction). */
6295 if (strncasecmp (str, "APSR_", 5) == 0)
6296 {
6297 unsigned found = 0;
6298 str += 5;
6299 while (found < 15)
6300 switch (*str++)
6301 {
6302 case 'c': found = (found & 1) ? 16 : found | 1; break;
6303 case 'n': found = (found & 2) ? 16 : found | 2; break;
6304 case 'z': found = (found & 4) ? 16 : found | 4; break;
6305 case 'v': found = (found & 8) ? 16 : found | 8; break;
6306 default: found = 16;
6307 }
6308 if (found != 15)
6309 goto failure;
6310 inst.operands[i].isvec = 1;
6311 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6312 inst.operands[i].reg = REG_PC;
6313 }
6314 else
6315 goto failure;
6316 break;
6317
6318 case OP_TB:
6319 po_misc_or_fail (parse_tb (&str));
6320 break;
6321
6322 /* Register lists. */
6323 case OP_REGLST:
6324 val = parse_reg_list (&str);
6325 if (*str == '^')
6326 {
6327 inst.operands[1].writeback = 1;
6328 str++;
6329 }
6330 break;
6331
6332 case OP_VRSLST:
6333 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6334 break;
6335
6336 case OP_VRDLST:
6337 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6338 break;
6339
6340 case OP_VRSDLST:
6341 /* Allow Q registers too. */
6342 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6343 REGLIST_NEON_D);
6344 if (val == FAIL)
6345 {
6346 inst.error = NULL;
6347 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6348 REGLIST_VFP_S);
6349 inst.operands[i].issingle = 1;
6350 }
6351 break;
6352
6353 case OP_NRDLST:
6354 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6355 REGLIST_NEON_D);
6356 break;
6357
6358 case OP_NSTRLST:
6359 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6360 &inst.operands[i].vectype);
6361 break;
6362
6363 /* Addressing modes */
6364 case OP_ADDR:
6365 po_misc_or_fail (parse_address (&str, i));
6366 break;
6367
6368 case OP_ADDRGLDR:
6369 po_misc_or_fail_no_backtrack (
6370 parse_address_group_reloc (&str, i, GROUP_LDR));
6371 break;
6372
6373 case OP_ADDRGLDRS:
6374 po_misc_or_fail_no_backtrack (
6375 parse_address_group_reloc (&str, i, GROUP_LDRS));
6376 break;
6377
6378 case OP_ADDRGLDC:
6379 po_misc_or_fail_no_backtrack (
6380 parse_address_group_reloc (&str, i, GROUP_LDC));
6381 break;
6382
6383 case OP_SH:
6384 po_misc_or_fail (parse_shifter_operand (&str, i));
6385 break;
6386
6387 case OP_SHG:
6388 po_misc_or_fail_no_backtrack (
6389 parse_shifter_operand_group_reloc (&str, i));
6390 break;
6391
6392 case OP_oSHll:
6393 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6394 break;
6395
6396 case OP_oSHar:
6397 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6398 break;
6399
6400 case OP_oSHllar:
6401 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6402 break;
6403
6404 default:
6405 as_fatal (_("unhandled operand code %d"), op_parse_code);
6406 }
6407
6408 /* Various value-based sanity checks and shared operations. We
6409 do not signal immediate failures for the register constraints;
6410 this allows a syntax error to take precedence. */
6411 switch (op_parse_code)
6412 {
6413 case OP_oRRnpc:
6414 case OP_RRnpc:
6415 case OP_RRnpcb:
6416 case OP_RRw:
6417 case OP_oRRw:
6418 case OP_RRnpc_I0:
6419 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6420 inst.error = BAD_PC;
6421 break;
6422
6423 case OP_oRRnpcsp:
6424 case OP_RRnpcsp:
6425 if (inst.operands[i].isreg)
6426 {
6427 if (inst.operands[i].reg == REG_PC)
6428 inst.error = BAD_PC;
6429 else if (inst.operands[i].reg == REG_SP)
6430 inst.error = BAD_SP;
6431 }
6432 break;
6433
6434 case OP_CPSF:
6435 case OP_ENDI:
6436 case OP_oROR:
6437 case OP_PSR:
6438 case OP_RVC_PSR:
6439 case OP_COND:
6440 case OP_oBARRIER:
6441 case OP_REGLST:
6442 case OP_VRSLST:
6443 case OP_VRDLST:
6444 case OP_VRSDLST:
6445 case OP_NRDLST:
6446 case OP_NSTRLST:
6447 if (val == FAIL)
6448 goto failure;
6449 inst.operands[i].imm = val;
6450 break;
6451
6452 default:
6453 break;
6454 }
6455
6456 /* If we get here, this operand was successfully parsed. */
6457 inst.operands[i].present = 1;
6458 continue;
6459
6460 bad_args:
6461 inst.error = BAD_ARGS;
6462
6463 failure:
6464 if (!backtrack_pos)
6465 {
6466 /* The parse routine should already have set inst.error, but set a
6467 default here just in case. */
6468 if (!inst.error)
6469 inst.error = _("syntax error");
6470 return FAIL;
6471 }
6472
6473 /* Do not backtrack over a trailing optional argument that
6474 absorbed some text. We will only fail again, with the
6475 'garbage following instruction' error message, which is
6476 probably less helpful than the current one. */
6477 if (backtrack_index == i && backtrack_pos != str
6478 && upat[i+1] == OP_stop)
6479 {
6480 if (!inst.error)
6481 inst.error = _("syntax error");
6482 return FAIL;
6483 }
6484
6485 /* Try again, skipping the optional argument at backtrack_pos. */
6486 str = backtrack_pos;
6487 inst.error = backtrack_error;
6488 inst.operands[backtrack_index].present = 0;
6489 i = backtrack_index;
6490 backtrack_pos = 0;
6491 }
6492
6493 /* Check that we have parsed all the arguments. */
6494 if (*str != '\0' && !inst.error)
6495 inst.error = _("garbage following instruction");
6496
6497 return inst.error ? FAIL : SUCCESS;
6498 }
6499
6500 #undef po_char_or_fail
6501 #undef po_reg_or_fail
6502 #undef po_reg_or_goto
6503 #undef po_imm_or_fail
6504 #undef po_scalar_or_fail
6505
6506 /* Shorthand macro for instruction encoding functions issuing errors. */
6507 #define constraint(expr, err) \
6508 do \
6509 { \
6510 if (expr) \
6511 { \
6512 inst.error = err; \
6513 return; \
6514 } \
6515 } \
6516 while (0)
6517
6518 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6519 instructions are unpredictable if these registers are used. This
6520 is the BadReg predicate in ARM's Thumb-2 documentation. */
6521 #define reject_bad_reg(reg) \
6522 do \
6523 if (reg == REG_SP || reg == REG_PC) \
6524 { \
6525 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6526 return; \
6527 } \
6528 while (0)
6529
6530 /* If REG is R13 (the stack pointer), warn that its use is
6531 deprecated. */
6532 #define warn_deprecated_sp(reg) \
6533 do \
6534 if (warn_on_deprecated && reg == REG_SP) \
6535 as_warn (_("use of r13 is deprecated")); \
6536 while (0)
6537
6538 /* Functions for operand encoding. ARM, then Thumb. */
6539
6540 #define rotate_left(v, n) (v << n | v >> (32 - n))
6541
6542 /* If VAL can be encoded in the immediate field of an ARM instruction,
6543 return the encoded form. Otherwise, return FAIL. */
6544
6545 static unsigned int
6546 encode_arm_immediate (unsigned int val)
6547 {
6548 unsigned int a, i;
6549
6550 for (i = 0; i < 32; i += 2)
6551 if ((a = rotate_left (val, i)) <= 0xff)
6552 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6553
6554 return FAIL;
6555 }
6556
6557 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6558 return the encoded form. Otherwise, return FAIL. */
6559 static unsigned int
6560 encode_thumb32_immediate (unsigned int val)
6561 {
6562 unsigned int a, i;
6563
6564 if (val <= 0xff)
6565 return val;
6566
6567 for (i = 1; i <= 24; i++)
6568 {
6569 a = val >> i;
6570 if ((val & ~(0xff << i)) == 0)
6571 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6572 }
6573
6574 a = val & 0xff;
6575 if (val == ((a << 16) | a))
6576 return 0x100 | a;
6577 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6578 return 0x300 | a;
6579
6580 a = val & 0xff00;
6581 if (val == ((a << 16) | a))
6582 return 0x200 | (a >> 8);
6583
6584 return FAIL;
6585 }
6586 /* Encode a VFP SP or DP register number into inst.instruction. */
6587
6588 static void
6589 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6590 {
6591 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6592 && reg > 15)
6593 {
6594 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6595 {
6596 if (thumb_mode)
6597 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6598 fpu_vfp_ext_d32);
6599 else
6600 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6601 fpu_vfp_ext_d32);
6602 }
6603 else
6604 {
6605 first_error (_("D register out of range for selected VFP version"));
6606 return;
6607 }
6608 }
6609
6610 switch (pos)
6611 {
6612 case VFP_REG_Sd:
6613 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6614 break;
6615
6616 case VFP_REG_Sn:
6617 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6618 break;
6619
6620 case VFP_REG_Sm:
6621 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6622 break;
6623
6624 case VFP_REG_Dd:
6625 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6626 break;
6627
6628 case VFP_REG_Dn:
6629 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6630 break;
6631
6632 case VFP_REG_Dm:
6633 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6634 break;
6635
6636 default:
6637 abort ();
6638 }
6639 }
6640
6641 /* Encode a <shift> in an ARM-format instruction. The immediate,
6642 if any, is handled by md_apply_fix. */
6643 static void
6644 encode_arm_shift (int i)
6645 {
6646 if (inst.operands[i].shift_kind == SHIFT_RRX)
6647 inst.instruction |= SHIFT_ROR << 5;
6648 else
6649 {
6650 inst.instruction |= inst.operands[i].shift_kind << 5;
6651 if (inst.operands[i].immisreg)
6652 {
6653 inst.instruction |= SHIFT_BY_REG;
6654 inst.instruction |= inst.operands[i].imm << 8;
6655 }
6656 else
6657 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6658 }
6659 }
6660
6661 static void
6662 encode_arm_shifter_operand (int i)
6663 {
6664 if (inst.operands[i].isreg)
6665 {
6666 inst.instruction |= inst.operands[i].reg;
6667 encode_arm_shift (i);
6668 }
6669 else
6670 inst.instruction |= INST_IMMEDIATE;
6671 }
6672
6673 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6674 static void
6675 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6676 {
6677 gas_assert (inst.operands[i].isreg);
6678 inst.instruction |= inst.operands[i].reg << 16;
6679
6680 if (inst.operands[i].preind)
6681 {
6682 if (is_t)
6683 {
6684 inst.error = _("instruction does not accept preindexed addressing");
6685 return;
6686 }
6687 inst.instruction |= PRE_INDEX;
6688 if (inst.operands[i].writeback)
6689 inst.instruction |= WRITE_BACK;
6690
6691 }
6692 else if (inst.operands[i].postind)
6693 {
6694 gas_assert (inst.operands[i].writeback);
6695 if (is_t)
6696 inst.instruction |= WRITE_BACK;
6697 }
6698 else /* unindexed - only for coprocessor */
6699 {
6700 inst.error = _("instruction does not accept unindexed addressing");
6701 return;
6702 }
6703
6704 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6705 && (((inst.instruction & 0x000f0000) >> 16)
6706 == ((inst.instruction & 0x0000f000) >> 12)))
6707 as_warn ((inst.instruction & LOAD_BIT)
6708 ? _("destination register same as write-back base")
6709 : _("source register same as write-back base"));
6710 }
6711
6712 /* inst.operands[i] was set up by parse_address. Encode it into an
6713 ARM-format mode 2 load or store instruction. If is_t is true,
6714 reject forms that cannot be used with a T instruction (i.e. not
6715 post-indexed). */
6716 static void
6717 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6718 {
6719 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
6720
6721 encode_arm_addr_mode_common (i, is_t);
6722
6723 if (inst.operands[i].immisreg)
6724 {
6725 constraint ((inst.operands[i].imm == REG_PC
6726 || (is_pc && inst.operands[i].writeback)),
6727 BAD_PC_ADDRESSING);
6728 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6729 inst.instruction |= inst.operands[i].imm;
6730 if (!inst.operands[i].negative)
6731 inst.instruction |= INDEX_UP;
6732 if (inst.operands[i].shifted)
6733 {
6734 if (inst.operands[i].shift_kind == SHIFT_RRX)
6735 inst.instruction |= SHIFT_ROR << 5;
6736 else
6737 {
6738 inst.instruction |= inst.operands[i].shift_kind << 5;
6739 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6740 }
6741 }
6742 }
6743 else /* immediate offset in inst.reloc */
6744 {
6745 if (is_pc && !inst.reloc.pc_rel)
6746 {
6747 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
6748 /* BAD_PC_ADDRESSING Condition =
6749 is_load => is_t
6750 which becomes !is_load || is_t. */
6751 constraint ((!is_load || is_t),
6752 BAD_PC_ADDRESSING);
6753 }
6754
6755 if (inst.reloc.type == BFD_RELOC_UNUSED)
6756 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6757 }
6758 }
6759
6760 /* inst.operands[i] was set up by parse_address. Encode it into an
6761 ARM-format mode 3 load or store instruction. Reject forms that
6762 cannot be used with such instructions. If is_t is true, reject
6763 forms that cannot be used with a T instruction (i.e. not
6764 post-indexed). */
6765 static void
6766 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6767 {
6768 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6769 {
6770 inst.error = _("instruction does not accept scaled register index");
6771 return;
6772 }
6773
6774 encode_arm_addr_mode_common (i, is_t);
6775
6776 if (inst.operands[i].immisreg)
6777 {
6778 constraint ((inst.operands[i].imm == REG_PC
6779 || inst.operands[i].reg == REG_PC),
6780 BAD_PC_ADDRESSING);
6781 inst.instruction |= inst.operands[i].imm;
6782 if (!inst.operands[i].negative)
6783 inst.instruction |= INDEX_UP;
6784 }
6785 else /* immediate offset in inst.reloc */
6786 {
6787 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
6788 && inst.operands[i].writeback),
6789 BAD_PC_WRITEBACK);
6790 inst.instruction |= HWOFFSET_IMM;
6791 if (inst.reloc.type == BFD_RELOC_UNUSED)
6792 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6793 }
6794 }
6795
6796 /* inst.operands[i] was set up by parse_address. Encode it into an
6797 ARM-format instruction. Reject all forms which cannot be encoded
6798 into a coprocessor load/store instruction. If wb_ok is false,
6799 reject use of writeback; if unind_ok is false, reject use of
6800 unindexed addressing. If reloc_override is not 0, use it instead
6801 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6802 (in which case it is preserved). */
6803
6804 static int
6805 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6806 {
6807 inst.instruction |= inst.operands[i].reg << 16;
6808
6809 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
6810
6811 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6812 {
6813 gas_assert (!inst.operands[i].writeback);
6814 if (!unind_ok)
6815 {
6816 inst.error = _("instruction does not support unindexed addressing");
6817 return FAIL;
6818 }
6819 inst.instruction |= inst.operands[i].imm;
6820 inst.instruction |= INDEX_UP;
6821 return SUCCESS;
6822 }
6823
6824 if (inst.operands[i].preind)
6825 inst.instruction |= PRE_INDEX;
6826
6827 if (inst.operands[i].writeback)
6828 {
6829 if (inst.operands[i].reg == REG_PC)
6830 {
6831 inst.error = _("pc may not be used with write-back");
6832 return FAIL;
6833 }
6834 if (!wb_ok)
6835 {
6836 inst.error = _("instruction does not support writeback");
6837 return FAIL;
6838 }
6839 inst.instruction |= WRITE_BACK;
6840 }
6841
6842 if (reloc_override)
6843 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
6844 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6845 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6846 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6847 {
6848 if (thumb_mode)
6849 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6850 else
6851 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6852 }
6853
6854 return SUCCESS;
6855 }
6856
6857 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6858 Determine whether it can be performed with a move instruction; if
6859 it can, convert inst.instruction to that move instruction and
6860 return TRUE; if it can't, convert inst.instruction to a literal-pool
6861 load and return FALSE. If this is not a valid thing to do in the
6862 current context, set inst.error and return TRUE.
6863
6864 inst.operands[i] describes the destination register. */
6865
6866 static bfd_boolean
6867 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6868 {
6869 unsigned long tbit;
6870
6871 if (thumb_p)
6872 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6873 else
6874 tbit = LOAD_BIT;
6875
6876 if ((inst.instruction & tbit) == 0)
6877 {
6878 inst.error = _("invalid pseudo operation");
6879 return TRUE;
6880 }
6881 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6882 {
6883 inst.error = _("constant expression expected");
6884 return TRUE;
6885 }
6886 if (inst.reloc.exp.X_op == O_constant)
6887 {
6888 if (thumb_p)
6889 {
6890 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6891 {
6892 /* This can be done with a mov(1) instruction. */
6893 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6894 inst.instruction |= inst.reloc.exp.X_add_number;
6895 return TRUE;
6896 }
6897 }
6898 else
6899 {
6900 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6901 if (value != FAIL)
6902 {
6903 /* This can be done with a mov instruction. */
6904 inst.instruction &= LITERAL_MASK;
6905 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6906 inst.instruction |= value & 0xfff;
6907 return TRUE;
6908 }
6909
6910 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6911 if (value != FAIL)
6912 {
6913 /* This can be done with a mvn instruction. */
6914 inst.instruction &= LITERAL_MASK;
6915 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6916 inst.instruction |= value & 0xfff;
6917 return TRUE;
6918 }
6919 }
6920 }
6921
6922 if (add_to_lit_pool () == FAIL)
6923 {
6924 inst.error = _("literal pool insertion failed");
6925 return TRUE;
6926 }
6927 inst.operands[1].reg = REG_PC;
6928 inst.operands[1].isreg = 1;
6929 inst.operands[1].preind = 1;
6930 inst.reloc.pc_rel = 1;
6931 inst.reloc.type = (thumb_p
6932 ? BFD_RELOC_ARM_THUMB_OFFSET
6933 : (mode_3
6934 ? BFD_RELOC_ARM_HWLITERAL
6935 : BFD_RELOC_ARM_LITERAL));
6936 return FALSE;
6937 }
6938
6939 /* Functions for instruction encoding, sorted by sub-architecture.
6940 First some generics; their names are taken from the conventional
6941 bit positions for register arguments in ARM format instructions. */
6942
6943 static void
6944 do_noargs (void)
6945 {
6946 }
6947
6948 static void
6949 do_rd (void)
6950 {
6951 inst.instruction |= inst.operands[0].reg << 12;
6952 }
6953
6954 static void
6955 do_rd_rm (void)
6956 {
6957 inst.instruction |= inst.operands[0].reg << 12;
6958 inst.instruction |= inst.operands[1].reg;
6959 }
6960
6961 static void
6962 do_rd_rn (void)
6963 {
6964 inst.instruction |= inst.operands[0].reg << 12;
6965 inst.instruction |= inst.operands[1].reg << 16;
6966 }
6967
6968 static void
6969 do_rn_rd (void)
6970 {
6971 inst.instruction |= inst.operands[0].reg << 16;
6972 inst.instruction |= inst.operands[1].reg << 12;
6973 }
6974
6975 static void
6976 do_rd_rm_rn (void)
6977 {
6978 unsigned Rn = inst.operands[2].reg;
6979 /* Enforce restrictions on SWP instruction. */
6980 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6981 {
6982 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6983 _("Rn must not overlap other operands"));
6984
6985 /* SWP{b} is deprecated for ARMv6* and ARMv7. */
6986 if (warn_on_deprecated
6987 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
6988 as_warn (_("swp{b} use is deprecated for this architecture"));
6989
6990 }
6991 inst.instruction |= inst.operands[0].reg << 12;
6992 inst.instruction |= inst.operands[1].reg;
6993 inst.instruction |= Rn << 16;
6994 }
6995
6996 static void
6997 do_rd_rn_rm (void)
6998 {
6999 inst.instruction |= inst.operands[0].reg << 12;
7000 inst.instruction |= inst.operands[1].reg << 16;
7001 inst.instruction |= inst.operands[2].reg;
7002 }
7003
7004 static void
7005 do_rm_rd_rn (void)
7006 {
7007 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7008 constraint (((inst.reloc.exp.X_op != O_constant
7009 && inst.reloc.exp.X_op != O_illegal)
7010 || inst.reloc.exp.X_add_number != 0),
7011 BAD_ADDR_MODE);
7012 inst.instruction |= inst.operands[0].reg;
7013 inst.instruction |= inst.operands[1].reg << 12;
7014 inst.instruction |= inst.operands[2].reg << 16;
7015 }
7016
7017 static void
7018 do_imm0 (void)
7019 {
7020 inst.instruction |= inst.operands[0].imm;
7021 }
7022
7023 static void
7024 do_rd_cpaddr (void)
7025 {
7026 inst.instruction |= inst.operands[0].reg << 12;
7027 encode_arm_cp_address (1, TRUE, TRUE, 0);
7028 }
7029
7030 /* ARM instructions, in alphabetical order by function name (except
7031 that wrapper functions appear immediately after the function they
7032 wrap). */
7033
7034 /* This is a pseudo-op of the form "adr rd, label" to be converted
7035 into a relative address of the form "add rd, pc, #label-.-8". */
7036
7037 static void
7038 do_adr (void)
7039 {
7040 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7041
7042 /* Frag hacking will turn this into a sub instruction if the offset turns
7043 out to be negative. */
7044 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7045 inst.reloc.pc_rel = 1;
7046 inst.reloc.exp.X_add_number -= 8;
7047 }
7048
7049 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7050 into a relative address of the form:
7051 add rd, pc, #low(label-.-8)"
7052 add rd, rd, #high(label-.-8)" */
7053
7054 static void
7055 do_adrl (void)
7056 {
7057 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7058
7059 /* Frag hacking will turn this into a sub instruction if the offset turns
7060 out to be negative. */
7061 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7062 inst.reloc.pc_rel = 1;
7063 inst.size = INSN_SIZE * 2;
7064 inst.reloc.exp.X_add_number -= 8;
7065 }
7066
7067 static void
7068 do_arit (void)
7069 {
7070 if (!inst.operands[1].present)
7071 inst.operands[1].reg = inst.operands[0].reg;
7072 inst.instruction |= inst.operands[0].reg << 12;
7073 inst.instruction |= inst.operands[1].reg << 16;
7074 encode_arm_shifter_operand (2);
7075 }
7076
7077 static void
7078 do_barrier (void)
7079 {
7080 if (inst.operands[0].present)
7081 {
7082 constraint ((inst.instruction & 0xf0) != 0x40
7083 && inst.operands[0].imm != 0xf,
7084 _("bad barrier type"));
7085 inst.instruction |= inst.operands[0].imm;
7086 }
7087 else
7088 inst.instruction |= 0xf;
7089 }
7090
7091 static void
7092 do_bfc (void)
7093 {
7094 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7095 constraint (msb > 32, _("bit-field extends past end of register"));
7096 /* The instruction encoding stores the LSB and MSB,
7097 not the LSB and width. */
7098 inst.instruction |= inst.operands[0].reg << 12;
7099 inst.instruction |= inst.operands[1].imm << 7;
7100 inst.instruction |= (msb - 1) << 16;
7101 }
7102
7103 static void
7104 do_bfi (void)
7105 {
7106 unsigned int msb;
7107
7108 /* #0 in second position is alternative syntax for bfc, which is
7109 the same instruction but with REG_PC in the Rm field. */
7110 if (!inst.operands[1].isreg)
7111 inst.operands[1].reg = REG_PC;
7112
7113 msb = inst.operands[2].imm + inst.operands[3].imm;
7114 constraint (msb > 32, _("bit-field extends past end of register"));
7115 /* The instruction encoding stores the LSB and MSB,
7116 not the LSB and width. */
7117 inst.instruction |= inst.operands[0].reg << 12;
7118 inst.instruction |= inst.operands[1].reg;
7119 inst.instruction |= inst.operands[2].imm << 7;
7120 inst.instruction |= (msb - 1) << 16;
7121 }
7122
7123 static void
7124 do_bfx (void)
7125 {
7126 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7127 _("bit-field extends past end of register"));
7128 inst.instruction |= inst.operands[0].reg << 12;
7129 inst.instruction |= inst.operands[1].reg;
7130 inst.instruction |= inst.operands[2].imm << 7;
7131 inst.instruction |= (inst.operands[3].imm - 1) << 16;
7132 }
7133
7134 /* ARM V5 breakpoint instruction (argument parse)
7135 BKPT <16 bit unsigned immediate>
7136 Instruction is not conditional.
7137 The bit pattern given in insns[] has the COND_ALWAYS condition,
7138 and it is an error if the caller tried to override that. */
7139
7140 static void
7141 do_bkpt (void)
7142 {
7143 /* Top 12 of 16 bits to bits 19:8. */
7144 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7145
7146 /* Bottom 4 of 16 bits to bits 3:0. */
7147 inst.instruction |= inst.operands[0].imm & 0xf;
7148 }
7149
7150 static void
7151 encode_branch (int default_reloc)
7152 {
7153 if (inst.operands[0].hasreloc)
7154 {
7155 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
7156 _("the only suffix valid here is '(plt)'"));
7157 inst.reloc.type = BFD_RELOC_ARM_PLT32;
7158 }
7159 else
7160 {
7161 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7162 }
7163 inst.reloc.pc_rel = 1;
7164 }
7165
7166 static void
7167 do_branch (void)
7168 {
7169 #ifdef OBJ_ELF
7170 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7171 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7172 else
7173 #endif
7174 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7175 }
7176
7177 static void
7178 do_bl (void)
7179 {
7180 #ifdef OBJ_ELF
7181 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7182 {
7183 if (inst.cond == COND_ALWAYS)
7184 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7185 else
7186 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7187 }
7188 else
7189 #endif
7190 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7191 }
7192
7193 /* ARM V5 branch-link-exchange instruction (argument parse)
7194 BLX <target_addr> ie BLX(1)
7195 BLX{<condition>} <Rm> ie BLX(2)
7196 Unfortunately, there are two different opcodes for this mnemonic.
7197 So, the insns[].value is not used, and the code here zaps values
7198 into inst.instruction.
7199 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7200
7201 static void
7202 do_blx (void)
7203 {
7204 if (inst.operands[0].isreg)
7205 {
7206 /* Arg is a register; the opcode provided by insns[] is correct.
7207 It is not illegal to do "blx pc", just useless. */
7208 if (inst.operands[0].reg == REG_PC)
7209 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7210
7211 inst.instruction |= inst.operands[0].reg;
7212 }
7213 else
7214 {
7215 /* Arg is an address; this instruction cannot be executed
7216 conditionally, and the opcode must be adjusted.
7217 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7218 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7219 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7220 inst.instruction = 0xfa000000;
7221 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7222 }
7223 }
7224
7225 static void
7226 do_bx (void)
7227 {
7228 bfd_boolean want_reloc;
7229
7230 if (inst.operands[0].reg == REG_PC)
7231 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7232
7233 inst.instruction |= inst.operands[0].reg;
7234 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7235 it is for ARMv4t or earlier. */
7236 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7237 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7238 want_reloc = TRUE;
7239
7240 #ifdef OBJ_ELF
7241 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7242 #endif
7243 want_reloc = FALSE;
7244
7245 if (want_reloc)
7246 inst.reloc.type = BFD_RELOC_ARM_V4BX;
7247 }
7248
7249
7250 /* ARM v5TEJ. Jump to Jazelle code. */
7251
7252 static void
7253 do_bxj (void)
7254 {
7255 if (inst.operands[0].reg == REG_PC)
7256 as_tsktsk (_("use of r15 in bxj is not really useful"));
7257
7258 inst.instruction |= inst.operands[0].reg;
7259 }
7260
7261 /* Co-processor data operation:
7262 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7263 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7264 static void
7265 do_cdp (void)
7266 {
7267 inst.instruction |= inst.operands[0].reg << 8;
7268 inst.instruction |= inst.operands[1].imm << 20;
7269 inst.instruction |= inst.operands[2].reg << 12;
7270 inst.instruction |= inst.operands[3].reg << 16;
7271 inst.instruction |= inst.operands[4].reg;
7272 inst.instruction |= inst.operands[5].imm << 5;
7273 }
7274
7275 static void
7276 do_cmp (void)
7277 {
7278 inst.instruction |= inst.operands[0].reg << 16;
7279 encode_arm_shifter_operand (1);
7280 }
7281
7282 /* Transfer between coprocessor and ARM registers.
7283 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7284 MRC2
7285 MCR{cond}
7286 MCR2
7287
7288 No special properties. */
7289
7290 static void
7291 do_co_reg (void)
7292 {
7293 unsigned Rd;
7294
7295 Rd = inst.operands[2].reg;
7296 if (thumb_mode)
7297 {
7298 if (inst.instruction == 0xee000010
7299 || inst.instruction == 0xfe000010)
7300 /* MCR, MCR2 */
7301 reject_bad_reg (Rd);
7302 else
7303 /* MRC, MRC2 */
7304 constraint (Rd == REG_SP, BAD_SP);
7305 }
7306 else
7307 {
7308 /* MCR */
7309 if (inst.instruction == 0xe000010)
7310 constraint (Rd == REG_PC, BAD_PC);
7311 }
7312
7313
7314 inst.instruction |= inst.operands[0].reg << 8;
7315 inst.instruction |= inst.operands[1].imm << 21;
7316 inst.instruction |= Rd << 12;
7317 inst.instruction |= inst.operands[3].reg << 16;
7318 inst.instruction |= inst.operands[4].reg;
7319 inst.instruction |= inst.operands[5].imm << 5;
7320 }
7321
7322 /* Transfer between coprocessor register and pair of ARM registers.
7323 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7324 MCRR2
7325 MRRC{cond}
7326 MRRC2
7327
7328 Two XScale instructions are special cases of these:
7329
7330 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7331 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7332
7333 Result unpredictable if Rd or Rn is R15. */
7334
7335 static void
7336 do_co_reg2c (void)
7337 {
7338 unsigned Rd, Rn;
7339
7340 Rd = inst.operands[2].reg;
7341 Rn = inst.operands[3].reg;
7342
7343 if (thumb_mode)
7344 {
7345 reject_bad_reg (Rd);
7346 reject_bad_reg (Rn);
7347 }
7348 else
7349 {
7350 constraint (Rd == REG_PC, BAD_PC);
7351 constraint (Rn == REG_PC, BAD_PC);
7352 }
7353
7354 inst.instruction |= inst.operands[0].reg << 8;
7355 inst.instruction |= inst.operands[1].imm << 4;
7356 inst.instruction |= Rd << 12;
7357 inst.instruction |= Rn << 16;
7358 inst.instruction |= inst.operands[4].reg;
7359 }
7360
7361 static void
7362 do_cpsi (void)
7363 {
7364 inst.instruction |= inst.operands[0].imm << 6;
7365 if (inst.operands[1].present)
7366 {
7367 inst.instruction |= CPSI_MMOD;
7368 inst.instruction |= inst.operands[1].imm;
7369 }
7370 }
7371
7372 static void
7373 do_dbg (void)
7374 {
7375 inst.instruction |= inst.operands[0].imm;
7376 }
7377
7378 static void
7379 do_it (void)
7380 {
7381 /* There is no IT instruction in ARM mode. We
7382 process it to do the validation as if in
7383 thumb mode, just in case the code gets
7384 assembled for thumb using the unified syntax. */
7385
7386 inst.size = 0;
7387 if (unified_syntax)
7388 {
7389 set_it_insn_type (IT_INSN);
7390 now_it.mask = (inst.instruction & 0xf) | 0x10;
7391 now_it.cc = inst.operands[0].imm;
7392 }
7393 }
7394
7395 static void
7396 do_ldmstm (void)
7397 {
7398 int base_reg = inst.operands[0].reg;
7399 int range = inst.operands[1].imm;
7400
7401 inst.instruction |= base_reg << 16;
7402 inst.instruction |= range;
7403
7404 if (inst.operands[1].writeback)
7405 inst.instruction |= LDM_TYPE_2_OR_3;
7406
7407 if (inst.operands[0].writeback)
7408 {
7409 inst.instruction |= WRITE_BACK;
7410 /* Check for unpredictable uses of writeback. */
7411 if (inst.instruction & LOAD_BIT)
7412 {
7413 /* Not allowed in LDM type 2. */
7414 if ((inst.instruction & LDM_TYPE_2_OR_3)
7415 && ((range & (1 << REG_PC)) == 0))
7416 as_warn (_("writeback of base register is UNPREDICTABLE"));
7417 /* Only allowed if base reg not in list for other types. */
7418 else if (range & (1 << base_reg))
7419 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7420 }
7421 else /* STM. */
7422 {
7423 /* Not allowed for type 2. */
7424 if (inst.instruction & LDM_TYPE_2_OR_3)
7425 as_warn (_("writeback of base register is UNPREDICTABLE"));
7426 /* Only allowed if base reg not in list, or first in list. */
7427 else if ((range & (1 << base_reg))
7428 && (range & ((1 << base_reg) - 1)))
7429 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7430 }
7431 }
7432 }
7433
7434 /* ARMv5TE load-consecutive (argument parse)
7435 Mode is like LDRH.
7436
7437 LDRccD R, mode
7438 STRccD R, mode. */
7439
7440 static void
7441 do_ldrd (void)
7442 {
7443 constraint (inst.operands[0].reg % 2 != 0,
7444 _("first destination register must be even"));
7445 constraint (inst.operands[1].present
7446 && inst.operands[1].reg != inst.operands[0].reg + 1,
7447 _("can only load two consecutive registers"));
7448 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7449 constraint (!inst.operands[2].isreg, _("'[' expected"));
7450
7451 if (!inst.operands[1].present)
7452 inst.operands[1].reg = inst.operands[0].reg + 1;
7453
7454 if (inst.instruction & LOAD_BIT)
7455 {
7456 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7457 register and the first register written; we have to diagnose
7458 overlap between the base and the second register written here. */
7459
7460 if (inst.operands[2].reg == inst.operands[1].reg
7461 && (inst.operands[2].writeback || inst.operands[2].postind))
7462 as_warn (_("base register written back, and overlaps "
7463 "second destination register"));
7464
7465 /* For an index-register load, the index register must not overlap the
7466 destination (even if not write-back). */
7467 else if (inst.operands[2].immisreg
7468 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7469 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7470 as_warn (_("index register overlaps destination register"));
7471 }
7472
7473 inst.instruction |= inst.operands[0].reg << 12;
7474 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7475 }
7476
7477 static void
7478 do_ldrex (void)
7479 {
7480 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7481 || inst.operands[1].postind || inst.operands[1].writeback
7482 || inst.operands[1].immisreg || inst.operands[1].shifted
7483 || inst.operands[1].negative
7484 /* This can arise if the programmer has written
7485 strex rN, rM, foo
7486 or if they have mistakenly used a register name as the last
7487 operand, eg:
7488 strex rN, rM, rX
7489 It is very difficult to distinguish between these two cases
7490 because "rX" might actually be a label. ie the register
7491 name has been occluded by a symbol of the same name. So we
7492 just generate a general 'bad addressing mode' type error
7493 message and leave it up to the programmer to discover the
7494 true cause and fix their mistake. */
7495 || (inst.operands[1].reg == REG_PC),
7496 BAD_ADDR_MODE);
7497
7498 constraint (inst.reloc.exp.X_op != O_constant
7499 || inst.reloc.exp.X_add_number != 0,
7500 _("offset must be zero in ARM encoding"));
7501
7502 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
7503
7504 inst.instruction |= inst.operands[0].reg << 12;
7505 inst.instruction |= inst.operands[1].reg << 16;
7506 inst.reloc.type = BFD_RELOC_UNUSED;
7507 }
7508
7509 static void
7510 do_ldrexd (void)
7511 {
7512 constraint (inst.operands[0].reg % 2 != 0,
7513 _("even register required"));
7514 constraint (inst.operands[1].present
7515 && inst.operands[1].reg != inst.operands[0].reg + 1,
7516 _("can only load two consecutive registers"));
7517 /* If op 1 were present and equal to PC, this function wouldn't
7518 have been called in the first place. */
7519 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7520
7521 inst.instruction |= inst.operands[0].reg << 12;
7522 inst.instruction |= inst.operands[2].reg << 16;
7523 }
7524
7525 static void
7526 do_ldst (void)
7527 {
7528 inst.instruction |= inst.operands[0].reg << 12;
7529 if (!inst.operands[1].isreg)
7530 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7531 return;
7532 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7533 }
7534
7535 static void
7536 do_ldstt (void)
7537 {
7538 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7539 reject [Rn,...]. */
7540 if (inst.operands[1].preind)
7541 {
7542 constraint (inst.reloc.exp.X_op != O_constant
7543 || inst.reloc.exp.X_add_number != 0,
7544 _("this instruction requires a post-indexed address"));
7545
7546 inst.operands[1].preind = 0;
7547 inst.operands[1].postind = 1;
7548 inst.operands[1].writeback = 1;
7549 }
7550 inst.instruction |= inst.operands[0].reg << 12;
7551 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7552 }
7553
7554 /* Halfword and signed-byte load/store operations. */
7555
7556 static void
7557 do_ldstv4 (void)
7558 {
7559 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7560 inst.instruction |= inst.operands[0].reg << 12;
7561 if (!inst.operands[1].isreg)
7562 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7563 return;
7564 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7565 }
7566
7567 static void
7568 do_ldsttv4 (void)
7569 {
7570 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7571 reject [Rn,...]. */
7572 if (inst.operands[1].preind)
7573 {
7574 constraint (inst.reloc.exp.X_op != O_constant
7575 || inst.reloc.exp.X_add_number != 0,
7576 _("this instruction requires a post-indexed address"));
7577
7578 inst.operands[1].preind = 0;
7579 inst.operands[1].postind = 1;
7580 inst.operands[1].writeback = 1;
7581 }
7582 inst.instruction |= inst.operands[0].reg << 12;
7583 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7584 }
7585
7586 /* Co-processor register load/store.
7587 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7588 static void
7589 do_lstc (void)
7590 {
7591 inst.instruction |= inst.operands[0].reg << 8;
7592 inst.instruction |= inst.operands[1].reg << 12;
7593 encode_arm_cp_address (2, TRUE, TRUE, 0);
7594 }
7595
7596 static void
7597 do_mlas (void)
7598 {
7599 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7600 if (inst.operands[0].reg == inst.operands[1].reg
7601 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7602 && !(inst.instruction & 0x00400000))
7603 as_tsktsk (_("Rd and Rm should be different in mla"));
7604
7605 inst.instruction |= inst.operands[0].reg << 16;
7606 inst.instruction |= inst.operands[1].reg;
7607 inst.instruction |= inst.operands[2].reg << 8;
7608 inst.instruction |= inst.operands[3].reg << 12;
7609 }
7610
7611 static void
7612 do_mov (void)
7613 {
7614 inst.instruction |= inst.operands[0].reg << 12;
7615 encode_arm_shifter_operand (1);
7616 }
7617
7618 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7619 static void
7620 do_mov16 (void)
7621 {
7622 bfd_vma imm;
7623 bfd_boolean top;
7624
7625 top = (inst.instruction & 0x00400000) != 0;
7626 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7627 _(":lower16: not allowed this instruction"));
7628 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7629 _(":upper16: not allowed instruction"));
7630 inst.instruction |= inst.operands[0].reg << 12;
7631 if (inst.reloc.type == BFD_RELOC_UNUSED)
7632 {
7633 imm = inst.reloc.exp.X_add_number;
7634 /* The value is in two pieces: 0:11, 16:19. */
7635 inst.instruction |= (imm & 0x00000fff);
7636 inst.instruction |= (imm & 0x0000f000) << 4;
7637 }
7638 }
7639
7640 static void do_vfp_nsyn_opcode (const char *);
7641
7642 static int
7643 do_vfp_nsyn_mrs (void)
7644 {
7645 if (inst.operands[0].isvec)
7646 {
7647 if (inst.operands[1].reg != 1)
7648 first_error (_("operand 1 must be FPSCR"));
7649 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7650 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7651 do_vfp_nsyn_opcode ("fmstat");
7652 }
7653 else if (inst.operands[1].isvec)
7654 do_vfp_nsyn_opcode ("fmrx");
7655 else
7656 return FAIL;
7657
7658 return SUCCESS;
7659 }
7660
7661 static int
7662 do_vfp_nsyn_msr (void)
7663 {
7664 if (inst.operands[0].isvec)
7665 do_vfp_nsyn_opcode ("fmxr");
7666 else
7667 return FAIL;
7668
7669 return SUCCESS;
7670 }
7671
7672 static void
7673 do_vmrs (void)
7674 {
7675 unsigned Rt = inst.operands[0].reg;
7676
7677 if (thumb_mode && inst.operands[0].reg == REG_SP)
7678 {
7679 inst.error = BAD_SP;
7680 return;
7681 }
7682
7683 /* APSR_ sets isvec. All other refs to PC are illegal. */
7684 if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
7685 {
7686 inst.error = BAD_PC;
7687 return;
7688 }
7689
7690 if (inst.operands[1].reg != 1)
7691 first_error (_("operand 1 must be FPSCR"));
7692
7693 inst.instruction |= (Rt << 12);
7694 }
7695
7696 static void
7697 do_vmsr (void)
7698 {
7699 unsigned Rt = inst.operands[1].reg;
7700
7701 if (thumb_mode)
7702 reject_bad_reg (Rt);
7703 else if (Rt == REG_PC)
7704 {
7705 inst.error = BAD_PC;
7706 return;
7707 }
7708
7709 if (inst.operands[0].reg != 1)
7710 first_error (_("operand 0 must be FPSCR"));
7711
7712 inst.instruction |= (Rt << 12);
7713 }
7714
7715 static void
7716 do_mrs (void)
7717 {
7718 if (do_vfp_nsyn_mrs () == SUCCESS)
7719 return;
7720
7721 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7722 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7723 != (PSR_c|PSR_f),
7724 _("'CPSR' or 'SPSR' expected"));
7725 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7726 inst.instruction |= inst.operands[0].reg << 12;
7727 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7728 }
7729
7730 /* Two possible forms:
7731 "{C|S}PSR_<field>, Rm",
7732 "{C|S}PSR_f, #expression". */
7733
7734 static void
7735 do_msr (void)
7736 {
7737 if (do_vfp_nsyn_msr () == SUCCESS)
7738 return;
7739
7740 inst.instruction |= inst.operands[0].imm;
7741 if (inst.operands[1].isreg)
7742 inst.instruction |= inst.operands[1].reg;
7743 else
7744 {
7745 inst.instruction |= INST_IMMEDIATE;
7746 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7747 inst.reloc.pc_rel = 0;
7748 }
7749 }
7750
7751 static void
7752 do_mul (void)
7753 {
7754 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
7755
7756 if (!inst.operands[2].present)
7757 inst.operands[2].reg = inst.operands[0].reg;
7758 inst.instruction |= inst.operands[0].reg << 16;
7759 inst.instruction |= inst.operands[1].reg;
7760 inst.instruction |= inst.operands[2].reg << 8;
7761
7762 if (inst.operands[0].reg == inst.operands[1].reg
7763 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7764 as_tsktsk (_("Rd and Rm should be different in mul"));
7765 }
7766
7767 /* Long Multiply Parser
7768 UMULL RdLo, RdHi, Rm, Rs
7769 SMULL RdLo, RdHi, Rm, Rs
7770 UMLAL RdLo, RdHi, Rm, Rs
7771 SMLAL RdLo, RdHi, Rm, Rs. */
7772
7773 static void
7774 do_mull (void)
7775 {
7776 inst.instruction |= inst.operands[0].reg << 12;
7777 inst.instruction |= inst.operands[1].reg << 16;
7778 inst.instruction |= inst.operands[2].reg;
7779 inst.instruction |= inst.operands[3].reg << 8;
7780
7781 /* rdhi and rdlo must be different. */
7782 if (inst.operands[0].reg == inst.operands[1].reg)
7783 as_tsktsk (_("rdhi and rdlo must be different"));
7784
7785 /* rdhi, rdlo and rm must all be different before armv6. */
7786 if ((inst.operands[0].reg == inst.operands[2].reg
7787 || inst.operands[1].reg == inst.operands[2].reg)
7788 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7789 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7790 }
7791
7792 static void
7793 do_nop (void)
7794 {
7795 if (inst.operands[0].present
7796 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
7797 {
7798 /* Architectural NOP hints are CPSR sets with no bits selected. */
7799 inst.instruction &= 0xf0000000;
7800 inst.instruction |= 0x0320f000;
7801 if (inst.operands[0].present)
7802 inst.instruction |= inst.operands[0].imm;
7803 }
7804 }
7805
7806 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7807 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7808 Condition defaults to COND_ALWAYS.
7809 Error if Rd, Rn or Rm are R15. */
7810
7811 static void
7812 do_pkhbt (void)
7813 {
7814 inst.instruction |= inst.operands[0].reg << 12;
7815 inst.instruction |= inst.operands[1].reg << 16;
7816 inst.instruction |= inst.operands[2].reg;
7817 if (inst.operands[3].present)
7818 encode_arm_shift (3);
7819 }
7820
7821 /* ARM V6 PKHTB (Argument Parse). */
7822
7823 static void
7824 do_pkhtb (void)
7825 {
7826 if (!inst.operands[3].present)
7827 {
7828 /* If the shift specifier is omitted, turn the instruction
7829 into pkhbt rd, rm, rn. */
7830 inst.instruction &= 0xfff00010;
7831 inst.instruction |= inst.operands[0].reg << 12;
7832 inst.instruction |= inst.operands[1].reg;
7833 inst.instruction |= inst.operands[2].reg << 16;
7834 }
7835 else
7836 {
7837 inst.instruction |= inst.operands[0].reg << 12;
7838 inst.instruction |= inst.operands[1].reg << 16;
7839 inst.instruction |= inst.operands[2].reg;
7840 encode_arm_shift (3);
7841 }
7842 }
7843
7844 /* ARMv5TE: Preload-Cache
7845
7846 PLD <addr_mode>
7847
7848 Syntactically, like LDR with B=1, W=0, L=1. */
7849
7850 static void
7851 do_pld (void)
7852 {
7853 constraint (!inst.operands[0].isreg,
7854 _("'[' expected after PLD mnemonic"));
7855 constraint (inst.operands[0].postind,
7856 _("post-indexed expression used in preload instruction"));
7857 constraint (inst.operands[0].writeback,
7858 _("writeback used in preload instruction"));
7859 constraint (!inst.operands[0].preind,
7860 _("unindexed addressing used in preload instruction"));
7861 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7862 }
7863
7864 /* ARMv7: PLI <addr_mode> */
7865 static void
7866 do_pli (void)
7867 {
7868 constraint (!inst.operands[0].isreg,
7869 _("'[' expected after PLI mnemonic"));
7870 constraint (inst.operands[0].postind,
7871 _("post-indexed expression used in preload instruction"));
7872 constraint (inst.operands[0].writeback,
7873 _("writeback used in preload instruction"));
7874 constraint (!inst.operands[0].preind,
7875 _("unindexed addressing used in preload instruction"));
7876 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7877 inst.instruction &= ~PRE_INDEX;
7878 }
7879
7880 static void
7881 do_push_pop (void)
7882 {
7883 inst.operands[1] = inst.operands[0];
7884 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7885 inst.operands[0].isreg = 1;
7886 inst.operands[0].writeback = 1;
7887 inst.operands[0].reg = REG_SP;
7888 do_ldmstm ();
7889 }
7890
7891 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7892 word at the specified address and the following word
7893 respectively.
7894 Unconditionally executed.
7895 Error if Rn is R15. */
7896
7897 static void
7898 do_rfe (void)
7899 {
7900 inst.instruction |= inst.operands[0].reg << 16;
7901 if (inst.operands[0].writeback)
7902 inst.instruction |= WRITE_BACK;
7903 }
7904
7905 /* ARM V6 ssat (argument parse). */
7906
7907 static void
7908 do_ssat (void)
7909 {
7910 inst.instruction |= inst.operands[0].reg << 12;
7911 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7912 inst.instruction |= inst.operands[2].reg;
7913
7914 if (inst.operands[3].present)
7915 encode_arm_shift (3);
7916 }
7917
7918 /* ARM V6 usat (argument parse). */
7919
7920 static void
7921 do_usat (void)
7922 {
7923 inst.instruction |= inst.operands[0].reg << 12;
7924 inst.instruction |= inst.operands[1].imm << 16;
7925 inst.instruction |= inst.operands[2].reg;
7926
7927 if (inst.operands[3].present)
7928 encode_arm_shift (3);
7929 }
7930
7931 /* ARM V6 ssat16 (argument parse). */
7932
7933 static void
7934 do_ssat16 (void)
7935 {
7936 inst.instruction |= inst.operands[0].reg << 12;
7937 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7938 inst.instruction |= inst.operands[2].reg;
7939 }
7940
7941 static void
7942 do_usat16 (void)
7943 {
7944 inst.instruction |= inst.operands[0].reg << 12;
7945 inst.instruction |= inst.operands[1].imm << 16;
7946 inst.instruction |= inst.operands[2].reg;
7947 }
7948
7949 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7950 preserving the other bits.
7951
7952 setend <endian_specifier>, where <endian_specifier> is either
7953 BE or LE. */
7954
7955 static void
7956 do_setend (void)
7957 {
7958 if (inst.operands[0].imm)
7959 inst.instruction |= 0x200;
7960 }
7961
7962 static void
7963 do_shift (void)
7964 {
7965 unsigned int Rm = (inst.operands[1].present
7966 ? inst.operands[1].reg
7967 : inst.operands[0].reg);
7968
7969 inst.instruction |= inst.operands[0].reg << 12;
7970 inst.instruction |= Rm;
7971 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7972 {
7973 inst.instruction |= inst.operands[2].reg << 8;
7974 inst.instruction |= SHIFT_BY_REG;
7975 }
7976 else
7977 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7978 }
7979
7980 static void
7981 do_smc (void)
7982 {
7983 inst.reloc.type = BFD_RELOC_ARM_SMC;
7984 inst.reloc.pc_rel = 0;
7985 }
7986
7987 static void
7988 do_swi (void)
7989 {
7990 inst.reloc.type = BFD_RELOC_ARM_SWI;
7991 inst.reloc.pc_rel = 0;
7992 }
7993
7994 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7995 SMLAxy{cond} Rd,Rm,Rs,Rn
7996 SMLAWy{cond} Rd,Rm,Rs,Rn
7997 Error if any register is R15. */
7998
7999 static void
8000 do_smla (void)
8001 {
8002 inst.instruction |= inst.operands[0].reg << 16;
8003 inst.instruction |= inst.operands[1].reg;
8004 inst.instruction |= inst.operands[2].reg << 8;
8005 inst.instruction |= inst.operands[3].reg << 12;
8006 }
8007
8008 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8009 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8010 Error if any register is R15.
8011 Warning if Rdlo == Rdhi. */
8012
8013 static void
8014 do_smlal (void)
8015 {
8016 inst.instruction |= inst.operands[0].reg << 12;
8017 inst.instruction |= inst.operands[1].reg << 16;
8018 inst.instruction |= inst.operands[2].reg;
8019 inst.instruction |= inst.operands[3].reg << 8;
8020
8021 if (inst.operands[0].reg == inst.operands[1].reg)
8022 as_tsktsk (_("rdhi and rdlo must be different"));
8023 }
8024
8025 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8026 SMULxy{cond} Rd,Rm,Rs
8027 Error if any register is R15. */
8028
8029 static void
8030 do_smul (void)
8031 {
8032 inst.instruction |= inst.operands[0].reg << 16;
8033 inst.instruction |= inst.operands[1].reg;
8034 inst.instruction |= inst.operands[2].reg << 8;
8035 }
8036
8037 /* ARM V6 srs (argument parse). The variable fields in the encoding are
8038 the same for both ARM and Thumb-2. */
8039
8040 static void
8041 do_srs (void)
8042 {
8043 int reg;
8044
8045 if (inst.operands[0].present)
8046 {
8047 reg = inst.operands[0].reg;
8048 constraint (reg != REG_SP, _("SRS base register must be r13"));
8049 }
8050 else
8051 reg = REG_SP;
8052
8053 inst.instruction |= reg << 16;
8054 inst.instruction |= inst.operands[1].imm;
8055 if (inst.operands[0].writeback || inst.operands[1].writeback)
8056 inst.instruction |= WRITE_BACK;
8057 }
8058
8059 /* ARM V6 strex (argument parse). */
8060
8061 static void
8062 do_strex (void)
8063 {
8064 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8065 || inst.operands[2].postind || inst.operands[2].writeback
8066 || inst.operands[2].immisreg || inst.operands[2].shifted
8067 || inst.operands[2].negative
8068 /* See comment in do_ldrex(). */
8069 || (inst.operands[2].reg == REG_PC),
8070 BAD_ADDR_MODE);
8071
8072 constraint (inst.operands[0].reg == inst.operands[1].reg
8073 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8074
8075 constraint (inst.reloc.exp.X_op != O_constant
8076 || inst.reloc.exp.X_add_number != 0,
8077 _("offset must be zero in ARM encoding"));
8078
8079 inst.instruction |= inst.operands[0].reg << 12;
8080 inst.instruction |= inst.operands[1].reg;
8081 inst.instruction |= inst.operands[2].reg << 16;
8082 inst.reloc.type = BFD_RELOC_UNUSED;
8083 }
8084
8085 static void
8086 do_strexd (void)
8087 {
8088 constraint (inst.operands[1].reg % 2 != 0,
8089 _("even register required"));
8090 constraint (inst.operands[2].present
8091 && inst.operands[2].reg != inst.operands[1].reg + 1,
8092 _("can only store two consecutive registers"));
8093 /* If op 2 were present and equal to PC, this function wouldn't
8094 have been called in the first place. */
8095 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8096
8097 constraint (inst.operands[0].reg == inst.operands[1].reg
8098 || inst.operands[0].reg == inst.operands[1].reg + 1
8099 || inst.operands[0].reg == inst.operands[3].reg,
8100 BAD_OVERLAP);
8101
8102 inst.instruction |= inst.operands[0].reg << 12;
8103 inst.instruction |= inst.operands[1].reg;
8104 inst.instruction |= inst.operands[3].reg << 16;
8105 }
8106
8107 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8108 extends it to 32-bits, and adds the result to a value in another
8109 register. You can specify a rotation by 0, 8, 16, or 24 bits
8110 before extracting the 16-bit value.
8111 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8112 Condition defaults to COND_ALWAYS.
8113 Error if any register uses R15. */
8114
8115 static void
8116 do_sxtah (void)
8117 {
8118 inst.instruction |= inst.operands[0].reg << 12;
8119 inst.instruction |= inst.operands[1].reg << 16;
8120 inst.instruction |= inst.operands[2].reg;
8121 inst.instruction |= inst.operands[3].imm << 10;
8122 }
8123
8124 /* ARM V6 SXTH.
8125
8126 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8127 Condition defaults to COND_ALWAYS.
8128 Error if any register uses R15. */
8129
8130 static void
8131 do_sxth (void)
8132 {
8133 inst.instruction |= inst.operands[0].reg << 12;
8134 inst.instruction |= inst.operands[1].reg;
8135 inst.instruction |= inst.operands[2].imm << 10;
8136 }
8137 \f
8138 /* VFP instructions. In a logical order: SP variant first, monad
8139 before dyad, arithmetic then move then load/store. */
8140
8141 static void
8142 do_vfp_sp_monadic (void)
8143 {
8144 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8145 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8146 }
8147
8148 static void
8149 do_vfp_sp_dyadic (void)
8150 {
8151 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8152 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8153 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8154 }
8155
8156 static void
8157 do_vfp_sp_compare_z (void)
8158 {
8159 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8160 }
8161
8162 static void
8163 do_vfp_dp_sp_cvt (void)
8164 {
8165 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8166 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8167 }
8168
8169 static void
8170 do_vfp_sp_dp_cvt (void)
8171 {
8172 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8173 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8174 }
8175
8176 static void
8177 do_vfp_reg_from_sp (void)
8178 {
8179 inst.instruction |= inst.operands[0].reg << 12;
8180 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8181 }
8182
8183 static void
8184 do_vfp_reg2_from_sp2 (void)
8185 {
8186 constraint (inst.operands[2].imm != 2,
8187 _("only two consecutive VFP SP registers allowed here"));
8188 inst.instruction |= inst.operands[0].reg << 12;
8189 inst.instruction |= inst.operands[1].reg << 16;
8190 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8191 }
8192
8193 static void
8194 do_vfp_sp_from_reg (void)
8195 {
8196 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8197 inst.instruction |= inst.operands[1].reg << 12;
8198 }
8199
8200 static void
8201 do_vfp_sp2_from_reg2 (void)
8202 {
8203 constraint (inst.operands[0].imm != 2,
8204 _("only two consecutive VFP SP registers allowed here"));
8205 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8206 inst.instruction |= inst.operands[1].reg << 12;
8207 inst.instruction |= inst.operands[2].reg << 16;
8208 }
8209
8210 static void
8211 do_vfp_sp_ldst (void)
8212 {
8213 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8214 encode_arm_cp_address (1, FALSE, TRUE, 0);
8215 }
8216
8217 static void
8218 do_vfp_dp_ldst (void)
8219 {
8220 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8221 encode_arm_cp_address (1, FALSE, TRUE, 0);
8222 }
8223
8224
8225 static void
8226 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8227 {
8228 if (inst.operands[0].writeback)
8229 inst.instruction |= WRITE_BACK;
8230 else
8231 constraint (ldstm_type != VFP_LDSTMIA,
8232 _("this addressing mode requires base-register writeback"));
8233 inst.instruction |= inst.operands[0].reg << 16;
8234 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8235 inst.instruction |= inst.operands[1].imm;
8236 }
8237
8238 static void
8239 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8240 {
8241 int count;
8242
8243 if (inst.operands[0].writeback)
8244 inst.instruction |= WRITE_BACK;
8245 else
8246 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8247 _("this addressing mode requires base-register writeback"));
8248
8249 inst.instruction |= inst.operands[0].reg << 16;
8250 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8251
8252 count = inst.operands[1].imm << 1;
8253 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8254 count += 1;
8255
8256 inst.instruction |= count;
8257 }
8258
8259 static void
8260 do_vfp_sp_ldstmia (void)
8261 {
8262 vfp_sp_ldstm (VFP_LDSTMIA);
8263 }
8264
8265 static void
8266 do_vfp_sp_ldstmdb (void)
8267 {
8268 vfp_sp_ldstm (VFP_LDSTMDB);
8269 }
8270
8271 static void
8272 do_vfp_dp_ldstmia (void)
8273 {
8274 vfp_dp_ldstm (VFP_LDSTMIA);
8275 }
8276
8277 static void
8278 do_vfp_dp_ldstmdb (void)
8279 {
8280 vfp_dp_ldstm (VFP_LDSTMDB);
8281 }
8282
8283 static void
8284 do_vfp_xp_ldstmia (void)
8285 {
8286 vfp_dp_ldstm (VFP_LDSTMIAX);
8287 }
8288
8289 static void
8290 do_vfp_xp_ldstmdb (void)
8291 {
8292 vfp_dp_ldstm (VFP_LDSTMDBX);
8293 }
8294
8295 static void
8296 do_vfp_dp_rd_rm (void)
8297 {
8298 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8299 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8300 }
8301
8302 static void
8303 do_vfp_dp_rn_rd (void)
8304 {
8305 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8306 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8307 }
8308
8309 static void
8310 do_vfp_dp_rd_rn (void)
8311 {
8312 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8313 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8314 }
8315
8316 static void
8317 do_vfp_dp_rd_rn_rm (void)
8318 {
8319 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8320 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8321 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8322 }
8323
8324 static void
8325 do_vfp_dp_rd (void)
8326 {
8327 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8328 }
8329
8330 static void
8331 do_vfp_dp_rm_rd_rn (void)
8332 {
8333 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8334 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8335 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8336 }
8337
8338 /* VFPv3 instructions. */
8339 static void
8340 do_vfp_sp_const (void)
8341 {
8342 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8343 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8344 inst.instruction |= (inst.operands[1].imm & 0x0f);
8345 }
8346
8347 static void
8348 do_vfp_dp_const (void)
8349 {
8350 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8351 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8352 inst.instruction |= (inst.operands[1].imm & 0x0f);
8353 }
8354
8355 static void
8356 vfp_conv (int srcsize)
8357 {
8358 unsigned immbits = srcsize - inst.operands[1].imm;
8359 inst.instruction |= (immbits & 1) << 5;
8360 inst.instruction |= (immbits >> 1);
8361 }
8362
8363 static void
8364 do_vfp_sp_conv_16 (void)
8365 {
8366 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8367 vfp_conv (16);
8368 }
8369
8370 static void
8371 do_vfp_dp_conv_16 (void)
8372 {
8373 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8374 vfp_conv (16);
8375 }
8376
8377 static void
8378 do_vfp_sp_conv_32 (void)
8379 {
8380 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8381 vfp_conv (32);
8382 }
8383
8384 static void
8385 do_vfp_dp_conv_32 (void)
8386 {
8387 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8388 vfp_conv (32);
8389 }
8390 \f
8391 /* FPA instructions. Also in a logical order. */
8392
8393 static void
8394 do_fpa_cmp (void)
8395 {
8396 inst.instruction |= inst.operands[0].reg << 16;
8397 inst.instruction |= inst.operands[1].reg;
8398 }
8399
8400 static void
8401 do_fpa_ldmstm (void)
8402 {
8403 inst.instruction |= inst.operands[0].reg << 12;
8404 switch (inst.operands[1].imm)
8405 {
8406 case 1: inst.instruction |= CP_T_X; break;
8407 case 2: inst.instruction |= CP_T_Y; break;
8408 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
8409 case 4: break;
8410 default: abort ();
8411 }
8412
8413 if (inst.instruction & (PRE_INDEX | INDEX_UP))
8414 {
8415 /* The instruction specified "ea" or "fd", so we can only accept
8416 [Rn]{!}. The instruction does not really support stacking or
8417 unstacking, so we have to emulate these by setting appropriate
8418 bits and offsets. */
8419 constraint (inst.reloc.exp.X_op != O_constant
8420 || inst.reloc.exp.X_add_number != 0,
8421 _("this instruction does not support indexing"));
8422
8423 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
8424 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
8425
8426 if (!(inst.instruction & INDEX_UP))
8427 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
8428
8429 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
8430 {
8431 inst.operands[2].preind = 0;
8432 inst.operands[2].postind = 1;
8433 }
8434 }
8435
8436 encode_arm_cp_address (2, TRUE, TRUE, 0);
8437 }
8438 \f
8439 /* iWMMXt instructions: strictly in alphabetical order. */
8440
8441 static void
8442 do_iwmmxt_tandorc (void)
8443 {
8444 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
8445 }
8446
8447 static void
8448 do_iwmmxt_textrc (void)
8449 {
8450 inst.instruction |= inst.operands[0].reg << 12;
8451 inst.instruction |= inst.operands[1].imm;
8452 }
8453
8454 static void
8455 do_iwmmxt_textrm (void)
8456 {
8457 inst.instruction |= inst.operands[0].reg << 12;
8458 inst.instruction |= inst.operands[1].reg << 16;
8459 inst.instruction |= inst.operands[2].imm;
8460 }
8461
8462 static void
8463 do_iwmmxt_tinsr (void)
8464 {
8465 inst.instruction |= inst.operands[0].reg << 16;
8466 inst.instruction |= inst.operands[1].reg << 12;
8467 inst.instruction |= inst.operands[2].imm;
8468 }
8469
8470 static void
8471 do_iwmmxt_tmia (void)
8472 {
8473 inst.instruction |= inst.operands[0].reg << 5;
8474 inst.instruction |= inst.operands[1].reg;
8475 inst.instruction |= inst.operands[2].reg << 12;
8476 }
8477
8478 static void
8479 do_iwmmxt_waligni (void)
8480 {
8481 inst.instruction |= inst.operands[0].reg << 12;
8482 inst.instruction |= inst.operands[1].reg << 16;
8483 inst.instruction |= inst.operands[2].reg;
8484 inst.instruction |= inst.operands[3].imm << 20;
8485 }
8486
8487 static void
8488 do_iwmmxt_wmerge (void)
8489 {
8490 inst.instruction |= inst.operands[0].reg << 12;
8491 inst.instruction |= inst.operands[1].reg << 16;
8492 inst.instruction |= inst.operands[2].reg;
8493 inst.instruction |= inst.operands[3].imm << 21;
8494 }
8495
8496 static void
8497 do_iwmmxt_wmov (void)
8498 {
8499 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8500 inst.instruction |= inst.operands[0].reg << 12;
8501 inst.instruction |= inst.operands[1].reg << 16;
8502 inst.instruction |= inst.operands[1].reg;
8503 }
8504
8505 static void
8506 do_iwmmxt_wldstbh (void)
8507 {
8508 int reloc;
8509 inst.instruction |= inst.operands[0].reg << 12;
8510 if (thumb_mode)
8511 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8512 else
8513 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8514 encode_arm_cp_address (1, TRUE, FALSE, reloc);
8515 }
8516
8517 static void
8518 do_iwmmxt_wldstw (void)
8519 {
8520 /* RIWR_RIWC clears .isreg for a control register. */
8521 if (!inst.operands[0].isreg)
8522 {
8523 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8524 inst.instruction |= 0xf0000000;
8525 }
8526
8527 inst.instruction |= inst.operands[0].reg << 12;
8528 encode_arm_cp_address (1, TRUE, TRUE, 0);
8529 }
8530
8531 static void
8532 do_iwmmxt_wldstd (void)
8533 {
8534 inst.instruction |= inst.operands[0].reg << 12;
8535 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
8536 && inst.operands[1].immisreg)
8537 {
8538 inst.instruction &= ~0x1a000ff;
8539 inst.instruction |= (0xf << 28);
8540 if (inst.operands[1].preind)
8541 inst.instruction |= PRE_INDEX;
8542 if (!inst.operands[1].negative)
8543 inst.instruction |= INDEX_UP;
8544 if (inst.operands[1].writeback)
8545 inst.instruction |= WRITE_BACK;
8546 inst.instruction |= inst.operands[1].reg << 16;
8547 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8548 inst.instruction |= inst.operands[1].imm;
8549 }
8550 else
8551 encode_arm_cp_address (1, TRUE, FALSE, 0);
8552 }
8553
8554 static void
8555 do_iwmmxt_wshufh (void)
8556 {
8557 inst.instruction |= inst.operands[0].reg << 12;
8558 inst.instruction |= inst.operands[1].reg << 16;
8559 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
8560 inst.instruction |= (inst.operands[2].imm & 0x0f);
8561 }
8562
8563 static void
8564 do_iwmmxt_wzero (void)
8565 {
8566 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8567 inst.instruction |= inst.operands[0].reg;
8568 inst.instruction |= inst.operands[0].reg << 12;
8569 inst.instruction |= inst.operands[0].reg << 16;
8570 }
8571
8572 static void
8573 do_iwmmxt_wrwrwr_or_imm5 (void)
8574 {
8575 if (inst.operands[2].isreg)
8576 do_rd_rn_rm ();
8577 else {
8578 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8579 _("immediate operand requires iWMMXt2"));
8580 do_rd_rn ();
8581 if (inst.operands[2].imm == 0)
8582 {
8583 switch ((inst.instruction >> 20) & 0xf)
8584 {
8585 case 4:
8586 case 5:
8587 case 6:
8588 case 7:
8589 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8590 inst.operands[2].imm = 16;
8591 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8592 break;
8593 case 8:
8594 case 9:
8595 case 10:
8596 case 11:
8597 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8598 inst.operands[2].imm = 32;
8599 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8600 break;
8601 case 12:
8602 case 13:
8603 case 14:
8604 case 15:
8605 {
8606 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8607 unsigned long wrn;
8608 wrn = (inst.instruction >> 16) & 0xf;
8609 inst.instruction &= 0xff0fff0f;
8610 inst.instruction |= wrn;
8611 /* Bail out here; the instruction is now assembled. */
8612 return;
8613 }
8614 }
8615 }
8616 /* Map 32 -> 0, etc. */
8617 inst.operands[2].imm &= 0x1f;
8618 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8619 }
8620 }
8621 \f
8622 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8623 operations first, then control, shift, and load/store. */
8624
8625 /* Insns like "foo X,Y,Z". */
8626
8627 static void
8628 do_mav_triple (void)
8629 {
8630 inst.instruction |= inst.operands[0].reg << 16;
8631 inst.instruction |= inst.operands[1].reg;
8632 inst.instruction |= inst.operands[2].reg << 12;
8633 }
8634
8635 /* Insns like "foo W,X,Y,Z".
8636 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8637
8638 static void
8639 do_mav_quad (void)
8640 {
8641 inst.instruction |= inst.operands[0].reg << 5;
8642 inst.instruction |= inst.operands[1].reg << 12;
8643 inst.instruction |= inst.operands[2].reg << 16;
8644 inst.instruction |= inst.operands[3].reg;
8645 }
8646
8647 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8648 static void
8649 do_mav_dspsc (void)
8650 {
8651 inst.instruction |= inst.operands[1].reg << 12;
8652 }
8653
8654 /* Maverick shift immediate instructions.
8655 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8656 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8657
8658 static void
8659 do_mav_shift (void)
8660 {
8661 int imm = inst.operands[2].imm;
8662
8663 inst.instruction |= inst.operands[0].reg << 12;
8664 inst.instruction |= inst.operands[1].reg << 16;
8665
8666 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8667 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8668 Bit 4 should be 0. */
8669 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8670
8671 inst.instruction |= imm;
8672 }
8673 \f
8674 /* XScale instructions. Also sorted arithmetic before move. */
8675
8676 /* Xscale multiply-accumulate (argument parse)
8677 MIAcc acc0,Rm,Rs
8678 MIAPHcc acc0,Rm,Rs
8679 MIAxycc acc0,Rm,Rs. */
8680
8681 static void
8682 do_xsc_mia (void)
8683 {
8684 inst.instruction |= inst.operands[1].reg;
8685 inst.instruction |= inst.operands[2].reg << 12;
8686 }
8687
8688 /* Xscale move-accumulator-register (argument parse)
8689
8690 MARcc acc0,RdLo,RdHi. */
8691
8692 static void
8693 do_xsc_mar (void)
8694 {
8695 inst.instruction |= inst.operands[1].reg << 12;
8696 inst.instruction |= inst.operands[2].reg << 16;
8697 }
8698
8699 /* Xscale move-register-accumulator (argument parse)
8700
8701 MRAcc RdLo,RdHi,acc0. */
8702
8703 static void
8704 do_xsc_mra (void)
8705 {
8706 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8707 inst.instruction |= inst.operands[0].reg << 12;
8708 inst.instruction |= inst.operands[1].reg << 16;
8709 }
8710 \f
8711 /* Encoding functions relevant only to Thumb. */
8712
8713 /* inst.operands[i] is a shifted-register operand; encode
8714 it into inst.instruction in the format used by Thumb32. */
8715
8716 static void
8717 encode_thumb32_shifted_operand (int i)
8718 {
8719 unsigned int value = inst.reloc.exp.X_add_number;
8720 unsigned int shift = inst.operands[i].shift_kind;
8721
8722 constraint (inst.operands[i].immisreg,
8723 _("shift by register not allowed in thumb mode"));
8724 inst.instruction |= inst.operands[i].reg;
8725 if (shift == SHIFT_RRX)
8726 inst.instruction |= SHIFT_ROR << 4;
8727 else
8728 {
8729 constraint (inst.reloc.exp.X_op != O_constant,
8730 _("expression too complex"));
8731
8732 constraint (value > 32
8733 || (value == 32 && (shift == SHIFT_LSL
8734 || shift == SHIFT_ROR)),
8735 _("shift expression is too large"));
8736
8737 if (value == 0)
8738 shift = SHIFT_LSL;
8739 else if (value == 32)
8740 value = 0;
8741
8742 inst.instruction |= shift << 4;
8743 inst.instruction |= (value & 0x1c) << 10;
8744 inst.instruction |= (value & 0x03) << 6;
8745 }
8746 }
8747
8748
8749 /* inst.operands[i] was set up by parse_address. Encode it into a
8750 Thumb32 format load or store instruction. Reject forms that cannot
8751 be used with such instructions. If is_t is true, reject forms that
8752 cannot be used with a T instruction; if is_d is true, reject forms
8753 that cannot be used with a D instruction. If it is a store insn,
8754 reject PC in Rn. */
8755
8756 static void
8757 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8758 {
8759 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8760
8761 constraint (!inst.operands[i].isreg,
8762 _("Instruction does not support =N addresses"));
8763
8764 inst.instruction |= inst.operands[i].reg << 16;
8765 if (inst.operands[i].immisreg)
8766 {
8767 constraint (is_pc, BAD_PC_ADDRESSING);
8768 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8769 constraint (inst.operands[i].negative,
8770 _("Thumb does not support negative register indexing"));
8771 constraint (inst.operands[i].postind,
8772 _("Thumb does not support register post-indexing"));
8773 constraint (inst.operands[i].writeback,
8774 _("Thumb does not support register indexing with writeback"));
8775 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8776 _("Thumb supports only LSL in shifted register indexing"));
8777
8778 inst.instruction |= inst.operands[i].imm;
8779 if (inst.operands[i].shifted)
8780 {
8781 constraint (inst.reloc.exp.X_op != O_constant,
8782 _("expression too complex"));
8783 constraint (inst.reloc.exp.X_add_number < 0
8784 || inst.reloc.exp.X_add_number > 3,
8785 _("shift out of range"));
8786 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8787 }
8788 inst.reloc.type = BFD_RELOC_UNUSED;
8789 }
8790 else if (inst.operands[i].preind)
8791 {
8792 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
8793 constraint (is_t && inst.operands[i].writeback,
8794 _("cannot use writeback with this instruction"));
8795 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
8796 && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
8797
8798 if (is_d)
8799 {
8800 inst.instruction |= 0x01000000;
8801 if (inst.operands[i].writeback)
8802 inst.instruction |= 0x00200000;
8803 }
8804 else
8805 {
8806 inst.instruction |= 0x00000c00;
8807 if (inst.operands[i].writeback)
8808 inst.instruction |= 0x00000100;
8809 }
8810 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8811 }
8812 else if (inst.operands[i].postind)
8813 {
8814 gas_assert (inst.operands[i].writeback);
8815 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8816 constraint (is_t, _("cannot use post-indexing with this instruction"));
8817
8818 if (is_d)
8819 inst.instruction |= 0x00200000;
8820 else
8821 inst.instruction |= 0x00000900;
8822 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8823 }
8824 else /* unindexed - only for coprocessor */
8825 inst.error = _("instruction does not accept unindexed addressing");
8826 }
8827
8828 /* Table of Thumb instructions which exist in both 16- and 32-bit
8829 encodings (the latter only in post-V6T2 cores). The index is the
8830 value used in the insns table below. When there is more than one
8831 possible 16-bit encoding for the instruction, this table always
8832 holds variant (1).
8833 Also contains several pseudo-instructions used during relaxation. */
8834 #define T16_32_TAB \
8835 X(_adc, 4140, eb400000), \
8836 X(_adcs, 4140, eb500000), \
8837 X(_add, 1c00, eb000000), \
8838 X(_adds, 1c00, eb100000), \
8839 X(_addi, 0000, f1000000), \
8840 X(_addis, 0000, f1100000), \
8841 X(_add_pc,000f, f20f0000), \
8842 X(_add_sp,000d, f10d0000), \
8843 X(_adr, 000f, f20f0000), \
8844 X(_and, 4000, ea000000), \
8845 X(_ands, 4000, ea100000), \
8846 X(_asr, 1000, fa40f000), \
8847 X(_asrs, 1000, fa50f000), \
8848 X(_b, e000, f000b000), \
8849 X(_bcond, d000, f0008000), \
8850 X(_bic, 4380, ea200000), \
8851 X(_bics, 4380, ea300000), \
8852 X(_cmn, 42c0, eb100f00), \
8853 X(_cmp, 2800, ebb00f00), \
8854 X(_cpsie, b660, f3af8400), \
8855 X(_cpsid, b670, f3af8600), \
8856 X(_cpy, 4600, ea4f0000), \
8857 X(_dec_sp,80dd, f1ad0d00), \
8858 X(_eor, 4040, ea800000), \
8859 X(_eors, 4040, ea900000), \
8860 X(_inc_sp,00dd, f10d0d00), \
8861 X(_ldmia, c800, e8900000), \
8862 X(_ldr, 6800, f8500000), \
8863 X(_ldrb, 7800, f8100000), \
8864 X(_ldrh, 8800, f8300000), \
8865 X(_ldrsb, 5600, f9100000), \
8866 X(_ldrsh, 5e00, f9300000), \
8867 X(_ldr_pc,4800, f85f0000), \
8868 X(_ldr_pc2,4800, f85f0000), \
8869 X(_ldr_sp,9800, f85d0000), \
8870 X(_lsl, 0000, fa00f000), \
8871 X(_lsls, 0000, fa10f000), \
8872 X(_lsr, 0800, fa20f000), \
8873 X(_lsrs, 0800, fa30f000), \
8874 X(_mov, 2000, ea4f0000), \
8875 X(_movs, 2000, ea5f0000), \
8876 X(_mul, 4340, fb00f000), \
8877 X(_muls, 4340, ffffffff), /* no 32b muls */ \
8878 X(_mvn, 43c0, ea6f0000), \
8879 X(_mvns, 43c0, ea7f0000), \
8880 X(_neg, 4240, f1c00000), /* rsb #0 */ \
8881 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
8882 X(_orr, 4300, ea400000), \
8883 X(_orrs, 4300, ea500000), \
8884 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8885 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
8886 X(_rev, ba00, fa90f080), \
8887 X(_rev16, ba40, fa90f090), \
8888 X(_revsh, bac0, fa90f0b0), \
8889 X(_ror, 41c0, fa60f000), \
8890 X(_rors, 41c0, fa70f000), \
8891 X(_sbc, 4180, eb600000), \
8892 X(_sbcs, 4180, eb700000), \
8893 X(_stmia, c000, e8800000), \
8894 X(_str, 6000, f8400000), \
8895 X(_strb, 7000, f8000000), \
8896 X(_strh, 8000, f8200000), \
8897 X(_str_sp,9000, f84d0000), \
8898 X(_sub, 1e00, eba00000), \
8899 X(_subs, 1e00, ebb00000), \
8900 X(_subi, 8000, f1a00000), \
8901 X(_subis, 8000, f1b00000), \
8902 X(_sxtb, b240, fa4ff080), \
8903 X(_sxth, b200, fa0ff080), \
8904 X(_tst, 4200, ea100f00), \
8905 X(_uxtb, b2c0, fa5ff080), \
8906 X(_uxth, b280, fa1ff080), \
8907 X(_nop, bf00, f3af8000), \
8908 X(_yield, bf10, f3af8001), \
8909 X(_wfe, bf20, f3af8002), \
8910 X(_wfi, bf30, f3af8003), \
8911 X(_sev, bf40, f3af8004),
8912
8913 /* To catch errors in encoding functions, the codes are all offset by
8914 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8915 as 16-bit instructions. */
8916 #define X(a,b,c) T_MNEM##a
8917 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8918 #undef X
8919
8920 #define X(a,b,c) 0x##b
8921 static const unsigned short thumb_op16[] = { T16_32_TAB };
8922 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8923 #undef X
8924
8925 #define X(a,b,c) 0x##c
8926 static const unsigned int thumb_op32[] = { T16_32_TAB };
8927 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8928 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8929 #undef X
8930 #undef T16_32_TAB
8931
8932 /* Thumb instruction encoders, in alphabetical order. */
8933
8934 /* ADDW or SUBW. */
8935
8936 static void
8937 do_t_add_sub_w (void)
8938 {
8939 int Rd, Rn;
8940
8941 Rd = inst.operands[0].reg;
8942 Rn = inst.operands[1].reg;
8943
8944 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
8945 is the SP-{plus,minus}-immediate form of the instruction. */
8946 if (Rn == REG_SP)
8947 constraint (Rd == REG_PC, BAD_PC);
8948 else
8949 reject_bad_reg (Rd);
8950
8951 inst.instruction |= (Rn << 16) | (Rd << 8);
8952 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8953 }
8954
8955 /* Parse an add or subtract instruction. We get here with inst.instruction
8956 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8957
8958 static void
8959 do_t_add_sub (void)
8960 {
8961 int Rd, Rs, Rn;
8962
8963 Rd = inst.operands[0].reg;
8964 Rs = (inst.operands[1].present
8965 ? inst.operands[1].reg /* Rd, Rs, foo */
8966 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8967
8968 if (Rd == REG_PC)
8969 set_it_insn_type_last ();
8970
8971 if (unified_syntax)
8972 {
8973 bfd_boolean flags;
8974 bfd_boolean narrow;
8975 int opcode;
8976
8977 flags = (inst.instruction == T_MNEM_adds
8978 || inst.instruction == T_MNEM_subs);
8979 if (flags)
8980 narrow = !in_it_block ();
8981 else
8982 narrow = in_it_block ();
8983 if (!inst.operands[2].isreg)
8984 {
8985 int add;
8986
8987 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
8988
8989 add = (inst.instruction == T_MNEM_add
8990 || inst.instruction == T_MNEM_adds);
8991 opcode = 0;
8992 if (inst.size_req != 4)
8993 {
8994 /* Attempt to use a narrow opcode, with relaxation if
8995 appropriate. */
8996 if (Rd == REG_SP && Rs == REG_SP && !flags)
8997 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8998 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8999 opcode = T_MNEM_add_sp;
9000 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9001 opcode = T_MNEM_add_pc;
9002 else if (Rd <= 7 && Rs <= 7 && narrow)
9003 {
9004 if (flags)
9005 opcode = add ? T_MNEM_addis : T_MNEM_subis;
9006 else
9007 opcode = add ? T_MNEM_addi : T_MNEM_subi;
9008 }
9009 if (opcode)
9010 {
9011 inst.instruction = THUMB_OP16(opcode);
9012 inst.instruction |= (Rd << 4) | Rs;
9013 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9014 if (inst.size_req != 2)
9015 inst.relax = opcode;
9016 }
9017 else
9018 constraint (inst.size_req == 2, BAD_HIREG);
9019 }
9020 if (inst.size_req == 4
9021 || (inst.size_req != 2 && !opcode))
9022 {
9023 if (Rd == REG_PC)
9024 {
9025 constraint (add, BAD_PC);
9026 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9027 _("only SUBS PC, LR, #const allowed"));
9028 constraint (inst.reloc.exp.X_op != O_constant,
9029 _("expression too complex"));
9030 constraint (inst.reloc.exp.X_add_number < 0
9031 || inst.reloc.exp.X_add_number > 0xff,
9032 _("immediate value out of range"));
9033 inst.instruction = T2_SUBS_PC_LR
9034 | inst.reloc.exp.X_add_number;
9035 inst.reloc.type = BFD_RELOC_UNUSED;
9036 return;
9037 }
9038 else if (Rs == REG_PC)
9039 {
9040 /* Always use addw/subw. */
9041 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9042 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9043 }
9044 else
9045 {
9046 inst.instruction = THUMB_OP32 (inst.instruction);
9047 inst.instruction = (inst.instruction & 0xe1ffffff)
9048 | 0x10000000;
9049 if (flags)
9050 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9051 else
9052 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9053 }
9054 inst.instruction |= Rd << 8;
9055 inst.instruction |= Rs << 16;
9056 }
9057 }
9058 else
9059 {
9060 Rn = inst.operands[2].reg;
9061 /* See if we can do this with a 16-bit instruction. */
9062 if (!inst.operands[2].shifted && inst.size_req != 4)
9063 {
9064 if (Rd > 7 || Rs > 7 || Rn > 7)
9065 narrow = FALSE;
9066
9067 if (narrow)
9068 {
9069 inst.instruction = ((inst.instruction == T_MNEM_adds
9070 || inst.instruction == T_MNEM_add)
9071 ? T_OPCODE_ADD_R3
9072 : T_OPCODE_SUB_R3);
9073 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9074 return;
9075 }
9076
9077 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9078 {
9079 /* Thumb-1 cores (except v6-M) require at least one high
9080 register in a narrow non flag setting add. */
9081 if (Rd > 7 || Rn > 7
9082 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9083 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9084 {
9085 if (Rd == Rn)
9086 {
9087 Rn = Rs;
9088 Rs = Rd;
9089 }
9090 inst.instruction = T_OPCODE_ADD_HI;
9091 inst.instruction |= (Rd & 8) << 4;
9092 inst.instruction |= (Rd & 7);
9093 inst.instruction |= Rn << 3;
9094 return;
9095 }
9096 }
9097 }
9098
9099 constraint (Rd == REG_PC, BAD_PC);
9100 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9101 constraint (Rs == REG_PC, BAD_PC);
9102 reject_bad_reg (Rn);
9103
9104 /* If we get here, it can't be done in 16 bits. */
9105 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9106 _("shift must be constant"));
9107 inst.instruction = THUMB_OP32 (inst.instruction);
9108 inst.instruction |= Rd << 8;
9109 inst.instruction |= Rs << 16;
9110 encode_thumb32_shifted_operand (2);
9111 }
9112 }
9113 else
9114 {
9115 constraint (inst.instruction == T_MNEM_adds
9116 || inst.instruction == T_MNEM_subs,
9117 BAD_THUMB32);
9118
9119 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9120 {
9121 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9122 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9123 BAD_HIREG);
9124
9125 inst.instruction = (inst.instruction == T_MNEM_add
9126 ? 0x0000 : 0x8000);
9127 inst.instruction |= (Rd << 4) | Rs;
9128 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9129 return;
9130 }
9131
9132 Rn = inst.operands[2].reg;
9133 constraint (inst.operands[2].shifted, _("unshifted register required"));
9134
9135 /* We now have Rd, Rs, and Rn set to registers. */
9136 if (Rd > 7 || Rs > 7 || Rn > 7)
9137 {
9138 /* Can't do this for SUB. */
9139 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9140 inst.instruction = T_OPCODE_ADD_HI;
9141 inst.instruction |= (Rd & 8) << 4;
9142 inst.instruction |= (Rd & 7);
9143 if (Rs == Rd)
9144 inst.instruction |= Rn << 3;
9145 else if (Rn == Rd)
9146 inst.instruction |= Rs << 3;
9147 else
9148 constraint (1, _("dest must overlap one source register"));
9149 }
9150 else
9151 {
9152 inst.instruction = (inst.instruction == T_MNEM_add
9153 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9154 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9155 }
9156 }
9157 }
9158
9159 static void
9160 do_t_adr (void)
9161 {
9162 unsigned Rd;
9163
9164 Rd = inst.operands[0].reg;
9165 reject_bad_reg (Rd);
9166
9167 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9168 {
9169 /* Defer to section relaxation. */
9170 inst.relax = inst.instruction;
9171 inst.instruction = THUMB_OP16 (inst.instruction);
9172 inst.instruction |= Rd << 4;
9173 }
9174 else if (unified_syntax && inst.size_req != 2)
9175 {
9176 /* Generate a 32-bit opcode. */
9177 inst.instruction = THUMB_OP32 (inst.instruction);
9178 inst.instruction |= Rd << 8;
9179 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9180 inst.reloc.pc_rel = 1;
9181 }
9182 else
9183 {
9184 /* Generate a 16-bit opcode. */
9185 inst.instruction = THUMB_OP16 (inst.instruction);
9186 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9187 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
9188 inst.reloc.pc_rel = 1;
9189
9190 inst.instruction |= Rd << 4;
9191 }
9192 }
9193
9194 /* Arithmetic instructions for which there is just one 16-bit
9195 instruction encoding, and it allows only two low registers.
9196 For maximal compatibility with ARM syntax, we allow three register
9197 operands even when Thumb-32 instructions are not available, as long
9198 as the first two are identical. For instance, both "sbc r0,r1" and
9199 "sbc r0,r0,r1" are allowed. */
9200 static void
9201 do_t_arit3 (void)
9202 {
9203 int Rd, Rs, Rn;
9204
9205 Rd = inst.operands[0].reg;
9206 Rs = (inst.operands[1].present
9207 ? inst.operands[1].reg /* Rd, Rs, foo */
9208 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9209 Rn = inst.operands[2].reg;
9210
9211 reject_bad_reg (Rd);
9212 reject_bad_reg (Rs);
9213 if (inst.operands[2].isreg)
9214 reject_bad_reg (Rn);
9215
9216 if (unified_syntax)
9217 {
9218 if (!inst.operands[2].isreg)
9219 {
9220 /* For an immediate, we always generate a 32-bit opcode;
9221 section relaxation will shrink it later if possible. */
9222 inst.instruction = THUMB_OP32 (inst.instruction);
9223 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9224 inst.instruction |= Rd << 8;
9225 inst.instruction |= Rs << 16;
9226 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9227 }
9228 else
9229 {
9230 bfd_boolean narrow;
9231
9232 /* See if we can do this with a 16-bit instruction. */
9233 if (THUMB_SETS_FLAGS (inst.instruction))
9234 narrow = !in_it_block ();
9235 else
9236 narrow = in_it_block ();
9237
9238 if (Rd > 7 || Rn > 7 || Rs > 7)
9239 narrow = FALSE;
9240 if (inst.operands[2].shifted)
9241 narrow = FALSE;
9242 if (inst.size_req == 4)
9243 narrow = FALSE;
9244
9245 if (narrow
9246 && Rd == Rs)
9247 {
9248 inst.instruction = THUMB_OP16 (inst.instruction);
9249 inst.instruction |= Rd;
9250 inst.instruction |= Rn << 3;
9251 return;
9252 }
9253
9254 /* If we get here, it can't be done in 16 bits. */
9255 constraint (inst.operands[2].shifted
9256 && inst.operands[2].immisreg,
9257 _("shift must be constant"));
9258 inst.instruction = THUMB_OP32 (inst.instruction);
9259 inst.instruction |= Rd << 8;
9260 inst.instruction |= Rs << 16;
9261 encode_thumb32_shifted_operand (2);
9262 }
9263 }
9264 else
9265 {
9266 /* On its face this is a lie - the instruction does set the
9267 flags. However, the only supported mnemonic in this mode
9268 says it doesn't. */
9269 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9270
9271 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9272 _("unshifted register required"));
9273 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9274 constraint (Rd != Rs,
9275 _("dest and source1 must be the same register"));
9276
9277 inst.instruction = THUMB_OP16 (inst.instruction);
9278 inst.instruction |= Rd;
9279 inst.instruction |= Rn << 3;
9280 }
9281 }
9282
9283 /* Similarly, but for instructions where the arithmetic operation is
9284 commutative, so we can allow either of them to be different from
9285 the destination operand in a 16-bit instruction. For instance, all
9286 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9287 accepted. */
9288 static void
9289 do_t_arit3c (void)
9290 {
9291 int Rd, Rs, Rn;
9292
9293 Rd = inst.operands[0].reg;
9294 Rs = (inst.operands[1].present
9295 ? inst.operands[1].reg /* Rd, Rs, foo */
9296 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9297 Rn = inst.operands[2].reg;
9298
9299 reject_bad_reg (Rd);
9300 reject_bad_reg (Rs);
9301 if (inst.operands[2].isreg)
9302 reject_bad_reg (Rn);
9303
9304 if (unified_syntax)
9305 {
9306 if (!inst.operands[2].isreg)
9307 {
9308 /* For an immediate, we always generate a 32-bit opcode;
9309 section relaxation will shrink it later if possible. */
9310 inst.instruction = THUMB_OP32 (inst.instruction);
9311 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9312 inst.instruction |= Rd << 8;
9313 inst.instruction |= Rs << 16;
9314 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9315 }
9316 else
9317 {
9318 bfd_boolean narrow;
9319
9320 /* See if we can do this with a 16-bit instruction. */
9321 if (THUMB_SETS_FLAGS (inst.instruction))
9322 narrow = !in_it_block ();
9323 else
9324 narrow = in_it_block ();
9325
9326 if (Rd > 7 || Rn > 7 || Rs > 7)
9327 narrow = FALSE;
9328 if (inst.operands[2].shifted)
9329 narrow = FALSE;
9330 if (inst.size_req == 4)
9331 narrow = FALSE;
9332
9333 if (narrow)
9334 {
9335 if (Rd == Rs)
9336 {
9337 inst.instruction = THUMB_OP16 (inst.instruction);
9338 inst.instruction |= Rd;
9339 inst.instruction |= Rn << 3;
9340 return;
9341 }
9342 if (Rd == Rn)
9343 {
9344 inst.instruction = THUMB_OP16 (inst.instruction);
9345 inst.instruction |= Rd;
9346 inst.instruction |= Rs << 3;
9347 return;
9348 }
9349 }
9350
9351 /* If we get here, it can't be done in 16 bits. */
9352 constraint (inst.operands[2].shifted
9353 && inst.operands[2].immisreg,
9354 _("shift must be constant"));
9355 inst.instruction = THUMB_OP32 (inst.instruction);
9356 inst.instruction |= Rd << 8;
9357 inst.instruction |= Rs << 16;
9358 encode_thumb32_shifted_operand (2);
9359 }
9360 }
9361 else
9362 {
9363 /* On its face this is a lie - the instruction does set the
9364 flags. However, the only supported mnemonic in this mode
9365 says it doesn't. */
9366 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9367
9368 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9369 _("unshifted register required"));
9370 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9371
9372 inst.instruction = THUMB_OP16 (inst.instruction);
9373 inst.instruction |= Rd;
9374
9375 if (Rd == Rs)
9376 inst.instruction |= Rn << 3;
9377 else if (Rd == Rn)
9378 inst.instruction |= Rs << 3;
9379 else
9380 constraint (1, _("dest must overlap one source register"));
9381 }
9382 }
9383
9384 static void
9385 do_t_barrier (void)
9386 {
9387 if (inst.operands[0].present)
9388 {
9389 constraint ((inst.instruction & 0xf0) != 0x40
9390 && inst.operands[0].imm != 0xf,
9391 _("bad barrier type"));
9392 inst.instruction |= inst.operands[0].imm;
9393 }
9394 else
9395 inst.instruction |= 0xf;
9396 }
9397
9398 static void
9399 do_t_bfc (void)
9400 {
9401 unsigned Rd;
9402 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9403 constraint (msb > 32, _("bit-field extends past end of register"));
9404 /* The instruction encoding stores the LSB and MSB,
9405 not the LSB and width. */
9406 Rd = inst.operands[0].reg;
9407 reject_bad_reg (Rd);
9408 inst.instruction |= Rd << 8;
9409 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
9410 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
9411 inst.instruction |= msb - 1;
9412 }
9413
9414 static void
9415 do_t_bfi (void)
9416 {
9417 int Rd, Rn;
9418 unsigned int msb;
9419
9420 Rd = inst.operands[0].reg;
9421 reject_bad_reg (Rd);
9422
9423 /* #0 in second position is alternative syntax for bfc, which is
9424 the same instruction but with REG_PC in the Rm field. */
9425 if (!inst.operands[1].isreg)
9426 Rn = REG_PC;
9427 else
9428 {
9429 Rn = inst.operands[1].reg;
9430 reject_bad_reg (Rn);
9431 }
9432
9433 msb = inst.operands[2].imm + inst.operands[3].imm;
9434 constraint (msb > 32, _("bit-field extends past end of register"));
9435 /* The instruction encoding stores the LSB and MSB,
9436 not the LSB and width. */
9437 inst.instruction |= Rd << 8;
9438 inst.instruction |= Rn << 16;
9439 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9440 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9441 inst.instruction |= msb - 1;
9442 }
9443
9444 static void
9445 do_t_bfx (void)
9446 {
9447 unsigned Rd, Rn;
9448
9449 Rd = inst.operands[0].reg;
9450 Rn = inst.operands[1].reg;
9451
9452 reject_bad_reg (Rd);
9453 reject_bad_reg (Rn);
9454
9455 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9456 _("bit-field extends past end of register"));
9457 inst.instruction |= Rd << 8;
9458 inst.instruction |= Rn << 16;
9459 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9460 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9461 inst.instruction |= inst.operands[3].imm - 1;
9462 }
9463
9464 /* ARM V5 Thumb BLX (argument parse)
9465 BLX <target_addr> which is BLX(1)
9466 BLX <Rm> which is BLX(2)
9467 Unfortunately, there are two different opcodes for this mnemonic.
9468 So, the insns[].value is not used, and the code here zaps values
9469 into inst.instruction.
9470
9471 ??? How to take advantage of the additional two bits of displacement
9472 available in Thumb32 mode? Need new relocation? */
9473
9474 static void
9475 do_t_blx (void)
9476 {
9477 set_it_insn_type_last ();
9478
9479 if (inst.operands[0].isreg)
9480 {
9481 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9482 /* We have a register, so this is BLX(2). */
9483 inst.instruction |= inst.operands[0].reg << 3;
9484 }
9485 else
9486 {
9487 /* No register. This must be BLX(1). */
9488 inst.instruction = 0xf000e800;
9489 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
9490 inst.reloc.pc_rel = 1;
9491 }
9492 }
9493
9494 static void
9495 do_t_branch (void)
9496 {
9497 int opcode;
9498 int cond;
9499
9500 cond = inst.cond;
9501 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
9502
9503 if (in_it_block ())
9504 {
9505 /* Conditional branches inside IT blocks are encoded as unconditional
9506 branches. */
9507 cond = COND_ALWAYS;
9508 }
9509 else
9510 cond = inst.cond;
9511
9512 if (cond != COND_ALWAYS)
9513 opcode = T_MNEM_bcond;
9514 else
9515 opcode = inst.instruction;
9516
9517 if (unified_syntax && inst.size_req == 4)
9518 {
9519 inst.instruction = THUMB_OP32(opcode);
9520 if (cond == COND_ALWAYS)
9521 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
9522 else
9523 {
9524 gas_assert (cond != 0xF);
9525 inst.instruction |= cond << 22;
9526 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
9527 }
9528 }
9529 else
9530 {
9531 inst.instruction = THUMB_OP16(opcode);
9532 if (cond == COND_ALWAYS)
9533 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
9534 else
9535 {
9536 inst.instruction |= cond << 8;
9537 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
9538 }
9539 /* Allow section relaxation. */
9540 if (unified_syntax && inst.size_req != 2)
9541 inst.relax = opcode;
9542 }
9543
9544 inst.reloc.pc_rel = 1;
9545 }
9546
9547 static void
9548 do_t_bkpt (void)
9549 {
9550 constraint (inst.cond != COND_ALWAYS,
9551 _("instruction is always unconditional"));
9552 if (inst.operands[0].present)
9553 {
9554 constraint (inst.operands[0].imm > 255,
9555 _("immediate value out of range"));
9556 inst.instruction |= inst.operands[0].imm;
9557 set_it_insn_type (NEUTRAL_IT_INSN);
9558 }
9559 }
9560
9561 static void
9562 do_t_branch23 (void)
9563 {
9564 set_it_insn_type_last ();
9565 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
9566 inst.reloc.pc_rel = 1;
9567
9568 #if defined(OBJ_COFF)
9569 /* If the destination of the branch is a defined symbol which does not have
9570 the THUMB_FUNC attribute, then we must be calling a function which has
9571 the (interfacearm) attribute. We look for the Thumb entry point to that
9572 function and change the branch to refer to that function instead. */
9573 if ( inst.reloc.exp.X_op == O_symbol
9574 && inst.reloc.exp.X_add_symbol != NULL
9575 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
9576 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
9577 inst.reloc.exp.X_add_symbol =
9578 find_real_start (inst.reloc.exp.X_add_symbol);
9579 #endif
9580 }
9581
9582 static void
9583 do_t_bx (void)
9584 {
9585 set_it_insn_type_last ();
9586 inst.instruction |= inst.operands[0].reg << 3;
9587 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9588 should cause the alignment to be checked once it is known. This is
9589 because BX PC only works if the instruction is word aligned. */
9590 }
9591
9592 static void
9593 do_t_bxj (void)
9594 {
9595 int Rm;
9596
9597 set_it_insn_type_last ();
9598 Rm = inst.operands[0].reg;
9599 reject_bad_reg (Rm);
9600 inst.instruction |= Rm << 16;
9601 }
9602
9603 static void
9604 do_t_clz (void)
9605 {
9606 unsigned Rd;
9607 unsigned Rm;
9608
9609 Rd = inst.operands[0].reg;
9610 Rm = inst.operands[1].reg;
9611
9612 reject_bad_reg (Rd);
9613 reject_bad_reg (Rm);
9614
9615 inst.instruction |= Rd << 8;
9616 inst.instruction |= Rm << 16;
9617 inst.instruction |= Rm;
9618 }
9619
9620 static void
9621 do_t_cps (void)
9622 {
9623 set_it_insn_type (OUTSIDE_IT_INSN);
9624 inst.instruction |= inst.operands[0].imm;
9625 }
9626
9627 static void
9628 do_t_cpsi (void)
9629 {
9630 set_it_insn_type (OUTSIDE_IT_INSN);
9631 if (unified_syntax
9632 && (inst.operands[1].present || inst.size_req == 4)
9633 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
9634 {
9635 unsigned int imod = (inst.instruction & 0x0030) >> 4;
9636 inst.instruction = 0xf3af8000;
9637 inst.instruction |= imod << 9;
9638 inst.instruction |= inst.operands[0].imm << 5;
9639 if (inst.operands[1].present)
9640 inst.instruction |= 0x100 | inst.operands[1].imm;
9641 }
9642 else
9643 {
9644 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
9645 && (inst.operands[0].imm & 4),
9646 _("selected processor does not support 'A' form "
9647 "of this instruction"));
9648 constraint (inst.operands[1].present || inst.size_req == 4,
9649 _("Thumb does not support the 2-argument "
9650 "form of this instruction"));
9651 inst.instruction |= inst.operands[0].imm;
9652 }
9653 }
9654
9655 /* THUMB CPY instruction (argument parse). */
9656
9657 static void
9658 do_t_cpy (void)
9659 {
9660 if (inst.size_req == 4)
9661 {
9662 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9663 inst.instruction |= inst.operands[0].reg << 8;
9664 inst.instruction |= inst.operands[1].reg;
9665 }
9666 else
9667 {
9668 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9669 inst.instruction |= (inst.operands[0].reg & 0x7);
9670 inst.instruction |= inst.operands[1].reg << 3;
9671 }
9672 }
9673
9674 static void
9675 do_t_cbz (void)
9676 {
9677 set_it_insn_type (OUTSIDE_IT_INSN);
9678 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9679 inst.instruction |= inst.operands[0].reg;
9680 inst.reloc.pc_rel = 1;
9681 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9682 }
9683
9684 static void
9685 do_t_dbg (void)
9686 {
9687 inst.instruction |= inst.operands[0].imm;
9688 }
9689
9690 static void
9691 do_t_div (void)
9692 {
9693 unsigned Rd, Rn, Rm;
9694
9695 Rd = inst.operands[0].reg;
9696 Rn = (inst.operands[1].present
9697 ? inst.operands[1].reg : Rd);
9698 Rm = inst.operands[2].reg;
9699
9700 reject_bad_reg (Rd);
9701 reject_bad_reg (Rn);
9702 reject_bad_reg (Rm);
9703
9704 inst.instruction |= Rd << 8;
9705 inst.instruction |= Rn << 16;
9706 inst.instruction |= Rm;
9707 }
9708
9709 static void
9710 do_t_hint (void)
9711 {
9712 if (unified_syntax && inst.size_req == 4)
9713 inst.instruction = THUMB_OP32 (inst.instruction);
9714 else
9715 inst.instruction = THUMB_OP16 (inst.instruction);
9716 }
9717
9718 static void
9719 do_t_it (void)
9720 {
9721 unsigned int cond = inst.operands[0].imm;
9722
9723 set_it_insn_type (IT_INSN);
9724 now_it.mask = (inst.instruction & 0xf) | 0x10;
9725 now_it.cc = cond;
9726
9727 /* If the condition is a negative condition, invert the mask. */
9728 if ((cond & 0x1) == 0x0)
9729 {
9730 unsigned int mask = inst.instruction & 0x000f;
9731
9732 if ((mask & 0x7) == 0)
9733 /* no conversion needed */;
9734 else if ((mask & 0x3) == 0)
9735 mask ^= 0x8;
9736 else if ((mask & 0x1) == 0)
9737 mask ^= 0xC;
9738 else
9739 mask ^= 0xE;
9740
9741 inst.instruction &= 0xfff0;
9742 inst.instruction |= mask;
9743 }
9744
9745 inst.instruction |= cond << 4;
9746 }
9747
9748 /* Helper function used for both push/pop and ldm/stm. */
9749 static void
9750 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9751 {
9752 bfd_boolean load;
9753
9754 load = (inst.instruction & (1 << 20)) != 0;
9755
9756 if (mask & (1 << 13))
9757 inst.error = _("SP not allowed in register list");
9758 if (load)
9759 {
9760 if (mask & (1 << 15))
9761 {
9762 if (mask & (1 << 14))
9763 inst.error = _("LR and PC should not both be in register list");
9764 else
9765 set_it_insn_type_last ();
9766 }
9767
9768 if ((mask & (1 << base)) != 0
9769 && writeback)
9770 as_warn (_("base register should not be in register list "
9771 "when written back"));
9772 }
9773 else
9774 {
9775 if (mask & (1 << 15))
9776 inst.error = _("PC not allowed in register list");
9777
9778 if (mask & (1 << base))
9779 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9780 }
9781
9782 if ((mask & (mask - 1)) == 0)
9783 {
9784 /* Single register transfers implemented as str/ldr. */
9785 if (writeback)
9786 {
9787 if (inst.instruction & (1 << 23))
9788 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9789 else
9790 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9791 }
9792 else
9793 {
9794 if (inst.instruction & (1 << 23))
9795 inst.instruction = 0x00800000; /* ia -> [base] */
9796 else
9797 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9798 }
9799
9800 inst.instruction |= 0xf8400000;
9801 if (load)
9802 inst.instruction |= 0x00100000;
9803
9804 mask = ffs (mask) - 1;
9805 mask <<= 12;
9806 }
9807 else if (writeback)
9808 inst.instruction |= WRITE_BACK;
9809
9810 inst.instruction |= mask;
9811 inst.instruction |= base << 16;
9812 }
9813
9814 static void
9815 do_t_ldmstm (void)
9816 {
9817 /* This really doesn't seem worth it. */
9818 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9819 _("expression too complex"));
9820 constraint (inst.operands[1].writeback,
9821 _("Thumb load/store multiple does not support {reglist}^"));
9822
9823 if (unified_syntax)
9824 {
9825 bfd_boolean narrow;
9826 unsigned mask;
9827
9828 narrow = FALSE;
9829 /* See if we can use a 16-bit instruction. */
9830 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9831 && inst.size_req != 4
9832 && !(inst.operands[1].imm & ~0xff))
9833 {
9834 mask = 1 << inst.operands[0].reg;
9835
9836 if (inst.operands[0].reg <= 7
9837 && (inst.instruction == T_MNEM_stmia
9838 ? inst.operands[0].writeback
9839 : (inst.operands[0].writeback
9840 == !(inst.operands[1].imm & mask))))
9841 {
9842 if (inst.instruction == T_MNEM_stmia
9843 && (inst.operands[1].imm & mask)
9844 && (inst.operands[1].imm & (mask - 1)))
9845 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9846 inst.operands[0].reg);
9847
9848 inst.instruction = THUMB_OP16 (inst.instruction);
9849 inst.instruction |= inst.operands[0].reg << 8;
9850 inst.instruction |= inst.operands[1].imm;
9851 narrow = TRUE;
9852 }
9853 else if (inst.operands[0] .reg == REG_SP
9854 && inst.operands[0].writeback)
9855 {
9856 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9857 ? T_MNEM_push : T_MNEM_pop);
9858 inst.instruction |= inst.operands[1].imm;
9859 narrow = TRUE;
9860 }
9861 }
9862
9863 if (!narrow)
9864 {
9865 if (inst.instruction < 0xffff)
9866 inst.instruction = THUMB_OP32 (inst.instruction);
9867
9868 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
9869 inst.operands[0].writeback);
9870 }
9871 }
9872 else
9873 {
9874 constraint (inst.operands[0].reg > 7
9875 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9876 constraint (inst.instruction != T_MNEM_ldmia
9877 && inst.instruction != T_MNEM_stmia,
9878 _("Thumb-2 instruction only valid in unified syntax"));
9879 if (inst.instruction == T_MNEM_stmia)
9880 {
9881 if (!inst.operands[0].writeback)
9882 as_warn (_("this instruction will write back the base register"));
9883 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9884 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9885 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9886 inst.operands[0].reg);
9887 }
9888 else
9889 {
9890 if (!inst.operands[0].writeback
9891 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9892 as_warn (_("this instruction will write back the base register"));
9893 else if (inst.operands[0].writeback
9894 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9895 as_warn (_("this instruction will not write back the base register"));
9896 }
9897
9898 inst.instruction = THUMB_OP16 (inst.instruction);
9899 inst.instruction |= inst.operands[0].reg << 8;
9900 inst.instruction |= inst.operands[1].imm;
9901 }
9902 }
9903
9904 static void
9905 do_t_ldrex (void)
9906 {
9907 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9908 || inst.operands[1].postind || inst.operands[1].writeback
9909 || inst.operands[1].immisreg || inst.operands[1].shifted
9910 || inst.operands[1].negative,
9911 BAD_ADDR_MODE);
9912
9913 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9914
9915 inst.instruction |= inst.operands[0].reg << 12;
9916 inst.instruction |= inst.operands[1].reg << 16;
9917 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9918 }
9919
9920 static void
9921 do_t_ldrexd (void)
9922 {
9923 if (!inst.operands[1].present)
9924 {
9925 constraint (inst.operands[0].reg == REG_LR,
9926 _("r14 not allowed as first register "
9927 "when second register is omitted"));
9928 inst.operands[1].reg = inst.operands[0].reg + 1;
9929 }
9930 constraint (inst.operands[0].reg == inst.operands[1].reg,
9931 BAD_OVERLAP);
9932
9933 inst.instruction |= inst.operands[0].reg << 12;
9934 inst.instruction |= inst.operands[1].reg << 8;
9935 inst.instruction |= inst.operands[2].reg << 16;
9936 }
9937
9938 static void
9939 do_t_ldst (void)
9940 {
9941 unsigned long opcode;
9942 int Rn;
9943
9944 if (inst.operands[0].isreg
9945 && !inst.operands[0].preind
9946 && inst.operands[0].reg == REG_PC)
9947 set_it_insn_type_last ();
9948
9949 opcode = inst.instruction;
9950 if (unified_syntax)
9951 {
9952 if (!inst.operands[1].isreg)
9953 {
9954 if (opcode <= 0xffff)
9955 inst.instruction = THUMB_OP32 (opcode);
9956 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9957 return;
9958 }
9959 if (inst.operands[1].isreg
9960 && !inst.operands[1].writeback
9961 && !inst.operands[1].shifted && !inst.operands[1].postind
9962 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9963 && opcode <= 0xffff
9964 && inst.size_req != 4)
9965 {
9966 /* Insn may have a 16-bit form. */
9967 Rn = inst.operands[1].reg;
9968 if (inst.operands[1].immisreg)
9969 {
9970 inst.instruction = THUMB_OP16 (opcode);
9971 /* [Rn, Rik] */
9972 if (Rn <= 7 && inst.operands[1].imm <= 7)
9973 goto op16;
9974 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
9975 reject_bad_reg (inst.operands[1].imm);
9976 }
9977 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9978 && opcode != T_MNEM_ldrsb)
9979 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9980 || (Rn == REG_SP && opcode == T_MNEM_str))
9981 {
9982 /* [Rn, #const] */
9983 if (Rn > 7)
9984 {
9985 if (Rn == REG_PC)
9986 {
9987 if (inst.reloc.pc_rel)
9988 opcode = T_MNEM_ldr_pc2;
9989 else
9990 opcode = T_MNEM_ldr_pc;
9991 }
9992 else
9993 {
9994 if (opcode == T_MNEM_ldr)
9995 opcode = T_MNEM_ldr_sp;
9996 else
9997 opcode = T_MNEM_str_sp;
9998 }
9999 inst.instruction = inst.operands[0].reg << 8;
10000 }
10001 else
10002 {
10003 inst.instruction = inst.operands[0].reg;
10004 inst.instruction |= inst.operands[1].reg << 3;
10005 }
10006 inst.instruction |= THUMB_OP16 (opcode);
10007 if (inst.size_req == 2)
10008 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10009 else
10010 inst.relax = opcode;
10011 return;
10012 }
10013 }
10014 /* Definitely a 32-bit variant. */
10015
10016 /* Do some validations regarding addressing modes. */
10017 if (inst.operands[1].immisreg && opcode != T_MNEM_ldr
10018 && opcode != T_MNEM_str)
10019 reject_bad_reg (inst.operands[1].imm);
10020
10021 inst.instruction = THUMB_OP32 (opcode);
10022 inst.instruction |= inst.operands[0].reg << 12;
10023 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10024 return;
10025 }
10026
10027 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10028
10029 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10030 {
10031 /* Only [Rn,Rm] is acceptable. */
10032 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10033 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10034 || inst.operands[1].postind || inst.operands[1].shifted
10035 || inst.operands[1].negative,
10036 _("Thumb does not support this addressing mode"));
10037 inst.instruction = THUMB_OP16 (inst.instruction);
10038 goto op16;
10039 }
10040
10041 inst.instruction = THUMB_OP16 (inst.instruction);
10042 if (!inst.operands[1].isreg)
10043 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10044 return;
10045
10046 constraint (!inst.operands[1].preind
10047 || inst.operands[1].shifted
10048 || inst.operands[1].writeback,
10049 _("Thumb does not support this addressing mode"));
10050 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10051 {
10052 constraint (inst.instruction & 0x0600,
10053 _("byte or halfword not valid for base register"));
10054 constraint (inst.operands[1].reg == REG_PC
10055 && !(inst.instruction & THUMB_LOAD_BIT),
10056 _("r15 based store not allowed"));
10057 constraint (inst.operands[1].immisreg,
10058 _("invalid base register for register offset"));
10059
10060 if (inst.operands[1].reg == REG_PC)
10061 inst.instruction = T_OPCODE_LDR_PC;
10062 else if (inst.instruction & THUMB_LOAD_BIT)
10063 inst.instruction = T_OPCODE_LDR_SP;
10064 else
10065 inst.instruction = T_OPCODE_STR_SP;
10066
10067 inst.instruction |= inst.operands[0].reg << 8;
10068 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10069 return;
10070 }
10071
10072 constraint (inst.operands[1].reg > 7, BAD_HIREG);
10073 if (!inst.operands[1].immisreg)
10074 {
10075 /* Immediate offset. */
10076 inst.instruction |= inst.operands[0].reg;
10077 inst.instruction |= inst.operands[1].reg << 3;
10078 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10079 return;
10080 }
10081
10082 /* Register offset. */
10083 constraint (inst.operands[1].imm > 7, BAD_HIREG);
10084 constraint (inst.operands[1].negative,
10085 _("Thumb does not support this addressing mode"));
10086
10087 op16:
10088 switch (inst.instruction)
10089 {
10090 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10091 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10092 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10093 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10094 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10095 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10096 case 0x5600 /* ldrsb */:
10097 case 0x5e00 /* ldrsh */: break;
10098 default: abort ();
10099 }
10100
10101 inst.instruction |= inst.operands[0].reg;
10102 inst.instruction |= inst.operands[1].reg << 3;
10103 inst.instruction |= inst.operands[1].imm << 6;
10104 }
10105
10106 static void
10107 do_t_ldstd (void)
10108 {
10109 if (!inst.operands[1].present)
10110 {
10111 inst.operands[1].reg = inst.operands[0].reg + 1;
10112 constraint (inst.operands[0].reg == REG_LR,
10113 _("r14 not allowed here"));
10114 }
10115 inst.instruction |= inst.operands[0].reg << 12;
10116 inst.instruction |= inst.operands[1].reg << 8;
10117 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10118 }
10119
10120 static void
10121 do_t_ldstt (void)
10122 {
10123 inst.instruction |= inst.operands[0].reg << 12;
10124 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10125 }
10126
10127 static void
10128 do_t_mla (void)
10129 {
10130 unsigned Rd, Rn, Rm, Ra;
10131
10132 Rd = inst.operands[0].reg;
10133 Rn = inst.operands[1].reg;
10134 Rm = inst.operands[2].reg;
10135 Ra = inst.operands[3].reg;
10136
10137 reject_bad_reg (Rd);
10138 reject_bad_reg (Rn);
10139 reject_bad_reg (Rm);
10140 reject_bad_reg (Ra);
10141
10142 inst.instruction |= Rd << 8;
10143 inst.instruction |= Rn << 16;
10144 inst.instruction |= Rm;
10145 inst.instruction |= Ra << 12;
10146 }
10147
10148 static void
10149 do_t_mlal (void)
10150 {
10151 unsigned RdLo, RdHi, Rn, Rm;
10152
10153 RdLo = inst.operands[0].reg;
10154 RdHi = inst.operands[1].reg;
10155 Rn = inst.operands[2].reg;
10156 Rm = inst.operands[3].reg;
10157
10158 reject_bad_reg (RdLo);
10159 reject_bad_reg (RdHi);
10160 reject_bad_reg (Rn);
10161 reject_bad_reg (Rm);
10162
10163 inst.instruction |= RdLo << 12;
10164 inst.instruction |= RdHi << 8;
10165 inst.instruction |= Rn << 16;
10166 inst.instruction |= Rm;
10167 }
10168
10169 static void
10170 do_t_mov_cmp (void)
10171 {
10172 unsigned Rn, Rm;
10173
10174 Rn = inst.operands[0].reg;
10175 Rm = inst.operands[1].reg;
10176
10177 if (Rn == REG_PC)
10178 set_it_insn_type_last ();
10179
10180 if (unified_syntax)
10181 {
10182 int r0off = (inst.instruction == T_MNEM_mov
10183 || inst.instruction == T_MNEM_movs) ? 8 : 16;
10184 unsigned long opcode;
10185 bfd_boolean narrow;
10186 bfd_boolean low_regs;
10187
10188 low_regs = (Rn <= 7 && Rm <= 7);
10189 opcode = inst.instruction;
10190 if (in_it_block ())
10191 narrow = opcode != T_MNEM_movs;
10192 else
10193 narrow = opcode != T_MNEM_movs || low_regs;
10194 if (inst.size_req == 4
10195 || inst.operands[1].shifted)
10196 narrow = FALSE;
10197
10198 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10199 if (opcode == T_MNEM_movs && inst.operands[1].isreg
10200 && !inst.operands[1].shifted
10201 && Rn == REG_PC
10202 && Rm == REG_LR)
10203 {
10204 inst.instruction = T2_SUBS_PC_LR;
10205 return;
10206 }
10207
10208 if (opcode == T_MNEM_cmp)
10209 {
10210 constraint (Rn == REG_PC, BAD_PC);
10211 if (narrow)
10212 {
10213 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10214 but valid. */
10215 warn_deprecated_sp (Rm);
10216 /* R15 was documented as a valid choice for Rm in ARMv6,
10217 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10218 tools reject R15, so we do too. */
10219 constraint (Rm == REG_PC, BAD_PC);
10220 }
10221 else
10222 reject_bad_reg (Rm);
10223 }
10224 else if (opcode == T_MNEM_mov
10225 || opcode == T_MNEM_movs)
10226 {
10227 if (inst.operands[1].isreg)
10228 {
10229 if (opcode == T_MNEM_movs)
10230 {
10231 reject_bad_reg (Rn);
10232 reject_bad_reg (Rm);
10233 }
10234 else if ((Rn == REG_SP || Rn == REG_PC)
10235 && (Rm == REG_SP || Rm == REG_PC))
10236 reject_bad_reg (Rm);
10237 }
10238 else
10239 reject_bad_reg (Rn);
10240 }
10241
10242 if (!inst.operands[1].isreg)
10243 {
10244 /* Immediate operand. */
10245 if (!in_it_block () && opcode == T_MNEM_mov)
10246 narrow = 0;
10247 if (low_regs && narrow)
10248 {
10249 inst.instruction = THUMB_OP16 (opcode);
10250 inst.instruction |= Rn << 8;
10251 if (inst.size_req == 2)
10252 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10253 else
10254 inst.relax = opcode;
10255 }
10256 else
10257 {
10258 inst.instruction = THUMB_OP32 (inst.instruction);
10259 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10260 inst.instruction |= Rn << r0off;
10261 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10262 }
10263 }
10264 else if (inst.operands[1].shifted && inst.operands[1].immisreg
10265 && (inst.instruction == T_MNEM_mov
10266 || inst.instruction == T_MNEM_movs))
10267 {
10268 /* Register shifts are encoded as separate shift instructions. */
10269 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
10270
10271 if (in_it_block ())
10272 narrow = !flags;
10273 else
10274 narrow = flags;
10275
10276 if (inst.size_req == 4)
10277 narrow = FALSE;
10278
10279 if (!low_regs || inst.operands[1].imm > 7)
10280 narrow = FALSE;
10281
10282 if (Rn != Rm)
10283 narrow = FALSE;
10284
10285 switch (inst.operands[1].shift_kind)
10286 {
10287 case SHIFT_LSL:
10288 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
10289 break;
10290 case SHIFT_ASR:
10291 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
10292 break;
10293 case SHIFT_LSR:
10294 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
10295 break;
10296 case SHIFT_ROR:
10297 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
10298 break;
10299 default:
10300 abort ();
10301 }
10302
10303 inst.instruction = opcode;
10304 if (narrow)
10305 {
10306 inst.instruction |= Rn;
10307 inst.instruction |= inst.operands[1].imm << 3;
10308 }
10309 else
10310 {
10311 if (flags)
10312 inst.instruction |= CONDS_BIT;
10313
10314 inst.instruction |= Rn << 8;
10315 inst.instruction |= Rm << 16;
10316 inst.instruction |= inst.operands[1].imm;
10317 }
10318 }
10319 else if (!narrow)
10320 {
10321 /* Some mov with immediate shift have narrow variants.
10322 Register shifts are handled above. */
10323 if (low_regs && inst.operands[1].shifted
10324 && (inst.instruction == T_MNEM_mov
10325 || inst.instruction == T_MNEM_movs))
10326 {
10327 if (in_it_block ())
10328 narrow = (inst.instruction == T_MNEM_mov);
10329 else
10330 narrow = (inst.instruction == T_MNEM_movs);
10331 }
10332
10333 if (narrow)
10334 {
10335 switch (inst.operands[1].shift_kind)
10336 {
10337 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10338 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10339 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10340 default: narrow = FALSE; break;
10341 }
10342 }
10343
10344 if (narrow)
10345 {
10346 inst.instruction |= Rn;
10347 inst.instruction |= Rm << 3;
10348 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10349 }
10350 else
10351 {
10352 inst.instruction = THUMB_OP32 (inst.instruction);
10353 inst.instruction |= Rn << r0off;
10354 encode_thumb32_shifted_operand (1);
10355 }
10356 }
10357 else
10358 switch (inst.instruction)
10359 {
10360 case T_MNEM_mov:
10361 inst.instruction = T_OPCODE_MOV_HR;
10362 inst.instruction |= (Rn & 0x8) << 4;
10363 inst.instruction |= (Rn & 0x7);
10364 inst.instruction |= Rm << 3;
10365 break;
10366
10367 case T_MNEM_movs:
10368 /* We know we have low registers at this point.
10369 Generate ADD Rd, Rs, #0. */
10370 inst.instruction = T_OPCODE_ADD_I3;
10371 inst.instruction |= Rn;
10372 inst.instruction |= Rm << 3;
10373 break;
10374
10375 case T_MNEM_cmp:
10376 if (low_regs)
10377 {
10378 inst.instruction = T_OPCODE_CMP_LR;
10379 inst.instruction |= Rn;
10380 inst.instruction |= Rm << 3;
10381 }
10382 else
10383 {
10384 inst.instruction = T_OPCODE_CMP_HR;
10385 inst.instruction |= (Rn & 0x8) << 4;
10386 inst.instruction |= (Rn & 0x7);
10387 inst.instruction |= Rm << 3;
10388 }
10389 break;
10390 }
10391 return;
10392 }
10393
10394 inst.instruction = THUMB_OP16 (inst.instruction);
10395
10396 /* PR 10443: Do not silently ignore shifted operands. */
10397 constraint (inst.operands[1].shifted,
10398 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10399
10400 if (inst.operands[1].isreg)
10401 {
10402 if (Rn < 8 && Rm < 8)
10403 {
10404 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10405 since a MOV instruction produces unpredictable results. */
10406 if (inst.instruction == T_OPCODE_MOV_I8)
10407 inst.instruction = T_OPCODE_ADD_I3;
10408 else
10409 inst.instruction = T_OPCODE_CMP_LR;
10410
10411 inst.instruction |= Rn;
10412 inst.instruction |= Rm << 3;
10413 }
10414 else
10415 {
10416 if (inst.instruction == T_OPCODE_MOV_I8)
10417 inst.instruction = T_OPCODE_MOV_HR;
10418 else
10419 inst.instruction = T_OPCODE_CMP_HR;
10420 do_t_cpy ();
10421 }
10422 }
10423 else
10424 {
10425 constraint (Rn > 7,
10426 _("only lo regs allowed with immediate"));
10427 inst.instruction |= Rn << 8;
10428 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10429 }
10430 }
10431
10432 static void
10433 do_t_mov16 (void)
10434 {
10435 unsigned Rd;
10436 bfd_vma imm;
10437 bfd_boolean top;
10438
10439 top = (inst.instruction & 0x00800000) != 0;
10440 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
10441 {
10442 constraint (top, _(":lower16: not allowed this instruction"));
10443 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
10444 }
10445 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
10446 {
10447 constraint (!top, _(":upper16: not allowed this instruction"));
10448 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
10449 }
10450
10451 Rd = inst.operands[0].reg;
10452 reject_bad_reg (Rd);
10453
10454 inst.instruction |= Rd << 8;
10455 if (inst.reloc.type == BFD_RELOC_UNUSED)
10456 {
10457 imm = inst.reloc.exp.X_add_number;
10458 inst.instruction |= (imm & 0xf000) << 4;
10459 inst.instruction |= (imm & 0x0800) << 15;
10460 inst.instruction |= (imm & 0x0700) << 4;
10461 inst.instruction |= (imm & 0x00ff);
10462 }
10463 }
10464
10465 static void
10466 do_t_mvn_tst (void)
10467 {
10468 unsigned Rn, Rm;
10469
10470 Rn = inst.operands[0].reg;
10471 Rm = inst.operands[1].reg;
10472
10473 if (inst.instruction == T_MNEM_cmp
10474 || inst.instruction == T_MNEM_cmn)
10475 constraint (Rn == REG_PC, BAD_PC);
10476 else
10477 reject_bad_reg (Rn);
10478 reject_bad_reg (Rm);
10479
10480 if (unified_syntax)
10481 {
10482 int r0off = (inst.instruction == T_MNEM_mvn
10483 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
10484 bfd_boolean narrow;
10485
10486 if (inst.size_req == 4
10487 || inst.instruction > 0xffff
10488 || inst.operands[1].shifted
10489 || Rn > 7 || Rm > 7)
10490 narrow = FALSE;
10491 else if (inst.instruction == T_MNEM_cmn)
10492 narrow = TRUE;
10493 else if (THUMB_SETS_FLAGS (inst.instruction))
10494 narrow = !in_it_block ();
10495 else
10496 narrow = in_it_block ();
10497
10498 if (!inst.operands[1].isreg)
10499 {
10500 /* For an immediate, we always generate a 32-bit opcode;
10501 section relaxation will shrink it later if possible. */
10502 if (inst.instruction < 0xffff)
10503 inst.instruction = THUMB_OP32 (inst.instruction);
10504 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10505 inst.instruction |= Rn << r0off;
10506 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10507 }
10508 else
10509 {
10510 /* See if we can do this with a 16-bit instruction. */
10511 if (narrow)
10512 {
10513 inst.instruction = THUMB_OP16 (inst.instruction);
10514 inst.instruction |= Rn;
10515 inst.instruction |= Rm << 3;
10516 }
10517 else
10518 {
10519 constraint (inst.operands[1].shifted
10520 && inst.operands[1].immisreg,
10521 _("shift must be constant"));
10522 if (inst.instruction < 0xffff)
10523 inst.instruction = THUMB_OP32 (inst.instruction);
10524 inst.instruction |= Rn << r0off;
10525 encode_thumb32_shifted_operand (1);
10526 }
10527 }
10528 }
10529 else
10530 {
10531 constraint (inst.instruction > 0xffff
10532 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
10533 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
10534 _("unshifted register required"));
10535 constraint (Rn > 7 || Rm > 7,
10536 BAD_HIREG);
10537
10538 inst.instruction = THUMB_OP16 (inst.instruction);
10539 inst.instruction |= Rn;
10540 inst.instruction |= Rm << 3;
10541 }
10542 }
10543
10544 static void
10545 do_t_mrs (void)
10546 {
10547 unsigned Rd;
10548 int flags;
10549
10550 if (do_vfp_nsyn_mrs () == SUCCESS)
10551 return;
10552
10553 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
10554 if (flags == 0)
10555 {
10556 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10557 _("selected processor does not support "
10558 "requested special purpose register"));
10559 }
10560 else
10561 {
10562 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10563 _("selected processor does not support "
10564 "requested special purpose register"));
10565 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10566 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
10567 _("'CPSR' or 'SPSR' expected"));
10568 }
10569
10570 Rd = inst.operands[0].reg;
10571 reject_bad_reg (Rd);
10572
10573 inst.instruction |= Rd << 8;
10574 inst.instruction |= (flags & SPSR_BIT) >> 2;
10575 inst.instruction |= inst.operands[1].imm & 0xff;
10576 }
10577
10578 static void
10579 do_t_msr (void)
10580 {
10581 int flags;
10582 unsigned Rn;
10583
10584 if (do_vfp_nsyn_msr () == SUCCESS)
10585 return;
10586
10587 constraint (!inst.operands[1].isreg,
10588 _("Thumb encoding does not support an immediate here"));
10589 flags = inst.operands[0].imm;
10590 if (flags & ~0xff)
10591 {
10592 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10593 _("selected processor does not support "
10594 "requested special purpose register"));
10595 }
10596 else
10597 {
10598 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10599 _("selected processor does not support "
10600 "requested special purpose register"));
10601 flags |= PSR_f;
10602 }
10603
10604 Rn = inst.operands[1].reg;
10605 reject_bad_reg (Rn);
10606
10607 inst.instruction |= (flags & SPSR_BIT) >> 2;
10608 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
10609 inst.instruction |= (flags & 0xff);
10610 inst.instruction |= Rn << 16;
10611 }
10612
10613 static void
10614 do_t_mul (void)
10615 {
10616 bfd_boolean narrow;
10617 unsigned Rd, Rn, Rm;
10618
10619 if (!inst.operands[2].present)
10620 inst.operands[2].reg = inst.operands[0].reg;
10621
10622 Rd = inst.operands[0].reg;
10623 Rn = inst.operands[1].reg;
10624 Rm = inst.operands[2].reg;
10625
10626 if (unified_syntax)
10627 {
10628 if (inst.size_req == 4
10629 || (Rd != Rn
10630 && Rd != Rm)
10631 || Rn > 7
10632 || Rm > 7)
10633 narrow = FALSE;
10634 else if (inst.instruction == T_MNEM_muls)
10635 narrow = !in_it_block ();
10636 else
10637 narrow = in_it_block ();
10638 }
10639 else
10640 {
10641 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
10642 constraint (Rn > 7 || Rm > 7,
10643 BAD_HIREG);
10644 narrow = TRUE;
10645 }
10646
10647 if (narrow)
10648 {
10649 /* 16-bit MULS/Conditional MUL. */
10650 inst.instruction = THUMB_OP16 (inst.instruction);
10651 inst.instruction |= Rd;
10652
10653 if (Rd == Rn)
10654 inst.instruction |= Rm << 3;
10655 else if (Rd == Rm)
10656 inst.instruction |= Rn << 3;
10657 else
10658 constraint (1, _("dest must overlap one source register"));
10659 }
10660 else
10661 {
10662 constraint (inst.instruction != T_MNEM_mul,
10663 _("Thumb-2 MUL must not set flags"));
10664 /* 32-bit MUL. */
10665 inst.instruction = THUMB_OP32 (inst.instruction);
10666 inst.instruction |= Rd << 8;
10667 inst.instruction |= Rn << 16;
10668 inst.instruction |= Rm << 0;
10669
10670 reject_bad_reg (Rd);
10671 reject_bad_reg (Rn);
10672 reject_bad_reg (Rm);
10673 }
10674 }
10675
10676 static void
10677 do_t_mull (void)
10678 {
10679 unsigned RdLo, RdHi, Rn, Rm;
10680
10681 RdLo = inst.operands[0].reg;
10682 RdHi = inst.operands[1].reg;
10683 Rn = inst.operands[2].reg;
10684 Rm = inst.operands[3].reg;
10685
10686 reject_bad_reg (RdLo);
10687 reject_bad_reg (RdHi);
10688 reject_bad_reg (Rn);
10689 reject_bad_reg (Rm);
10690
10691 inst.instruction |= RdLo << 12;
10692 inst.instruction |= RdHi << 8;
10693 inst.instruction |= Rn << 16;
10694 inst.instruction |= Rm;
10695
10696 if (RdLo == RdHi)
10697 as_tsktsk (_("rdhi and rdlo must be different"));
10698 }
10699
10700 static void
10701 do_t_nop (void)
10702 {
10703 set_it_insn_type (NEUTRAL_IT_INSN);
10704
10705 if (unified_syntax)
10706 {
10707 if (inst.size_req == 4 || inst.operands[0].imm > 15)
10708 {
10709 inst.instruction = THUMB_OP32 (inst.instruction);
10710 inst.instruction |= inst.operands[0].imm;
10711 }
10712 else
10713 {
10714 /* PR9722: Check for Thumb2 availability before
10715 generating a thumb2 nop instruction. */
10716 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
10717 {
10718 inst.instruction = THUMB_OP16 (inst.instruction);
10719 inst.instruction |= inst.operands[0].imm << 4;
10720 }
10721 else
10722 inst.instruction = 0x46c0;
10723 }
10724 }
10725 else
10726 {
10727 constraint (inst.operands[0].present,
10728 _("Thumb does not support NOP with hints"));
10729 inst.instruction = 0x46c0;
10730 }
10731 }
10732
10733 static void
10734 do_t_neg (void)
10735 {
10736 if (unified_syntax)
10737 {
10738 bfd_boolean narrow;
10739
10740 if (THUMB_SETS_FLAGS (inst.instruction))
10741 narrow = !in_it_block ();
10742 else
10743 narrow = in_it_block ();
10744 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10745 narrow = FALSE;
10746 if (inst.size_req == 4)
10747 narrow = FALSE;
10748
10749 if (!narrow)
10750 {
10751 inst.instruction = THUMB_OP32 (inst.instruction);
10752 inst.instruction |= inst.operands[0].reg << 8;
10753 inst.instruction |= inst.operands[1].reg << 16;
10754 }
10755 else
10756 {
10757 inst.instruction = THUMB_OP16 (inst.instruction);
10758 inst.instruction |= inst.operands[0].reg;
10759 inst.instruction |= inst.operands[1].reg << 3;
10760 }
10761 }
10762 else
10763 {
10764 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
10765 BAD_HIREG);
10766 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10767
10768 inst.instruction = THUMB_OP16 (inst.instruction);
10769 inst.instruction |= inst.operands[0].reg;
10770 inst.instruction |= inst.operands[1].reg << 3;
10771 }
10772 }
10773
10774 static void
10775 do_t_orn (void)
10776 {
10777 unsigned Rd, Rn;
10778
10779 Rd = inst.operands[0].reg;
10780 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
10781
10782 reject_bad_reg (Rd);
10783 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10784 reject_bad_reg (Rn);
10785
10786 inst.instruction |= Rd << 8;
10787 inst.instruction |= Rn << 16;
10788
10789 if (!inst.operands[2].isreg)
10790 {
10791 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10792 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10793 }
10794 else
10795 {
10796 unsigned Rm;
10797
10798 Rm = inst.operands[2].reg;
10799 reject_bad_reg (Rm);
10800
10801 constraint (inst.operands[2].shifted
10802 && inst.operands[2].immisreg,
10803 _("shift must be constant"));
10804 encode_thumb32_shifted_operand (2);
10805 }
10806 }
10807
10808 static void
10809 do_t_pkhbt (void)
10810 {
10811 unsigned Rd, Rn, Rm;
10812
10813 Rd = inst.operands[0].reg;
10814 Rn = inst.operands[1].reg;
10815 Rm = inst.operands[2].reg;
10816
10817 reject_bad_reg (Rd);
10818 reject_bad_reg (Rn);
10819 reject_bad_reg (Rm);
10820
10821 inst.instruction |= Rd << 8;
10822 inst.instruction |= Rn << 16;
10823 inst.instruction |= Rm;
10824 if (inst.operands[3].present)
10825 {
10826 unsigned int val = inst.reloc.exp.X_add_number;
10827 constraint (inst.reloc.exp.X_op != O_constant,
10828 _("expression too complex"));
10829 inst.instruction |= (val & 0x1c) << 10;
10830 inst.instruction |= (val & 0x03) << 6;
10831 }
10832 }
10833
10834 static void
10835 do_t_pkhtb (void)
10836 {
10837 if (!inst.operands[3].present)
10838 {
10839 unsigned Rtmp;
10840
10841 inst.instruction &= ~0x00000020;
10842
10843 /* PR 10168. Swap the Rm and Rn registers. */
10844 Rtmp = inst.operands[1].reg;
10845 inst.operands[1].reg = inst.operands[2].reg;
10846 inst.operands[2].reg = Rtmp;
10847 }
10848 do_t_pkhbt ();
10849 }
10850
10851 static void
10852 do_t_pld (void)
10853 {
10854 if (inst.operands[0].immisreg)
10855 reject_bad_reg (inst.operands[0].imm);
10856
10857 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
10858 }
10859
10860 static void
10861 do_t_push_pop (void)
10862 {
10863 unsigned mask;
10864
10865 constraint (inst.operands[0].writeback,
10866 _("push/pop do not support {reglist}^"));
10867 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10868 _("expression too complex"));
10869
10870 mask = inst.operands[0].imm;
10871 if ((mask & ~0xff) == 0)
10872 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
10873 else if ((inst.instruction == T_MNEM_push
10874 && (mask & ~0xff) == 1 << REG_LR)
10875 || (inst.instruction == T_MNEM_pop
10876 && (mask & ~0xff) == 1 << REG_PC))
10877 {
10878 inst.instruction = THUMB_OP16 (inst.instruction);
10879 inst.instruction |= THUMB_PP_PC_LR;
10880 inst.instruction |= mask & 0xff;
10881 }
10882 else if (unified_syntax)
10883 {
10884 inst.instruction = THUMB_OP32 (inst.instruction);
10885 encode_thumb2_ldmstm (13, mask, TRUE);
10886 }
10887 else
10888 {
10889 inst.error = _("invalid register list to push/pop instruction");
10890 return;
10891 }
10892 }
10893
10894 static void
10895 do_t_rbit (void)
10896 {
10897 unsigned Rd, Rm;
10898
10899 Rd = inst.operands[0].reg;
10900 Rm = inst.operands[1].reg;
10901
10902 reject_bad_reg (Rd);
10903 reject_bad_reg (Rm);
10904
10905 inst.instruction |= Rd << 8;
10906 inst.instruction |= Rm << 16;
10907 inst.instruction |= Rm;
10908 }
10909
10910 static void
10911 do_t_rev (void)
10912 {
10913 unsigned Rd, Rm;
10914
10915 Rd = inst.operands[0].reg;
10916 Rm = inst.operands[1].reg;
10917
10918 reject_bad_reg (Rd);
10919 reject_bad_reg (Rm);
10920
10921 if (Rd <= 7 && Rm <= 7
10922 && inst.size_req != 4)
10923 {
10924 inst.instruction = THUMB_OP16 (inst.instruction);
10925 inst.instruction |= Rd;
10926 inst.instruction |= Rm << 3;
10927 }
10928 else if (unified_syntax)
10929 {
10930 inst.instruction = THUMB_OP32 (inst.instruction);
10931 inst.instruction |= Rd << 8;
10932 inst.instruction |= Rm << 16;
10933 inst.instruction |= Rm;
10934 }
10935 else
10936 inst.error = BAD_HIREG;
10937 }
10938
10939 static void
10940 do_t_rrx (void)
10941 {
10942 unsigned Rd, Rm;
10943
10944 Rd = inst.operands[0].reg;
10945 Rm = inst.operands[1].reg;
10946
10947 reject_bad_reg (Rd);
10948 reject_bad_reg (Rm);
10949
10950 inst.instruction |= Rd << 8;
10951 inst.instruction |= Rm;
10952 }
10953
10954 static void
10955 do_t_rsb (void)
10956 {
10957 unsigned Rd, Rs;
10958
10959 Rd = inst.operands[0].reg;
10960 Rs = (inst.operands[1].present
10961 ? inst.operands[1].reg /* Rd, Rs, foo */
10962 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10963
10964 reject_bad_reg (Rd);
10965 reject_bad_reg (Rs);
10966 if (inst.operands[2].isreg)
10967 reject_bad_reg (inst.operands[2].reg);
10968
10969 inst.instruction |= Rd << 8;
10970 inst.instruction |= Rs << 16;
10971 if (!inst.operands[2].isreg)
10972 {
10973 bfd_boolean narrow;
10974
10975 if ((inst.instruction & 0x00100000) != 0)
10976 narrow = !in_it_block ();
10977 else
10978 narrow = in_it_block ();
10979
10980 if (Rd > 7 || Rs > 7)
10981 narrow = FALSE;
10982
10983 if (inst.size_req == 4 || !unified_syntax)
10984 narrow = FALSE;
10985
10986 if (inst.reloc.exp.X_op != O_constant
10987 || inst.reloc.exp.X_add_number != 0)
10988 narrow = FALSE;
10989
10990 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10991 relaxation, but it doesn't seem worth the hassle. */
10992 if (narrow)
10993 {
10994 inst.reloc.type = BFD_RELOC_UNUSED;
10995 inst.instruction = THUMB_OP16 (T_MNEM_negs);
10996 inst.instruction |= Rs << 3;
10997 inst.instruction |= Rd;
10998 }
10999 else
11000 {
11001 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11002 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11003 }
11004 }
11005 else
11006 encode_thumb32_shifted_operand (2);
11007 }
11008
11009 static void
11010 do_t_setend (void)
11011 {
11012 set_it_insn_type (OUTSIDE_IT_INSN);
11013 if (inst.operands[0].imm)
11014 inst.instruction |= 0x8;
11015 }
11016
11017 static void
11018 do_t_shift (void)
11019 {
11020 if (!inst.operands[1].present)
11021 inst.operands[1].reg = inst.operands[0].reg;
11022
11023 if (unified_syntax)
11024 {
11025 bfd_boolean narrow;
11026 int shift_kind;
11027
11028 switch (inst.instruction)
11029 {
11030 case T_MNEM_asr:
11031 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11032 case T_MNEM_lsl:
11033 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11034 case T_MNEM_lsr:
11035 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11036 case T_MNEM_ror:
11037 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11038 default: abort ();
11039 }
11040
11041 if (THUMB_SETS_FLAGS (inst.instruction))
11042 narrow = !in_it_block ();
11043 else
11044 narrow = in_it_block ();
11045 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11046 narrow = FALSE;
11047 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11048 narrow = FALSE;
11049 if (inst.operands[2].isreg
11050 && (inst.operands[1].reg != inst.operands[0].reg
11051 || inst.operands[2].reg > 7))
11052 narrow = FALSE;
11053 if (inst.size_req == 4)
11054 narrow = FALSE;
11055
11056 reject_bad_reg (inst.operands[0].reg);
11057 reject_bad_reg (inst.operands[1].reg);
11058
11059 if (!narrow)
11060 {
11061 if (inst.operands[2].isreg)
11062 {
11063 reject_bad_reg (inst.operands[2].reg);
11064 inst.instruction = THUMB_OP32 (inst.instruction);
11065 inst.instruction |= inst.operands[0].reg << 8;
11066 inst.instruction |= inst.operands[1].reg << 16;
11067 inst.instruction |= inst.operands[2].reg;
11068 }
11069 else
11070 {
11071 inst.operands[1].shifted = 1;
11072 inst.operands[1].shift_kind = shift_kind;
11073 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11074 ? T_MNEM_movs : T_MNEM_mov);
11075 inst.instruction |= inst.operands[0].reg << 8;
11076 encode_thumb32_shifted_operand (1);
11077 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11078 inst.reloc.type = BFD_RELOC_UNUSED;
11079 }
11080 }
11081 else
11082 {
11083 if (inst.operands[2].isreg)
11084 {
11085 switch (shift_kind)
11086 {
11087 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11088 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11089 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11090 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11091 default: abort ();
11092 }
11093
11094 inst.instruction |= inst.operands[0].reg;
11095 inst.instruction |= inst.operands[2].reg << 3;
11096 }
11097 else
11098 {
11099 switch (shift_kind)
11100 {
11101 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11102 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11103 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11104 default: abort ();
11105 }
11106 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11107 inst.instruction |= inst.operands[0].reg;
11108 inst.instruction |= inst.operands[1].reg << 3;
11109 }
11110 }
11111 }
11112 else
11113 {
11114 constraint (inst.operands[0].reg > 7
11115 || inst.operands[1].reg > 7, BAD_HIREG);
11116 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11117
11118 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
11119 {
11120 constraint (inst.operands[2].reg > 7, BAD_HIREG);
11121 constraint (inst.operands[0].reg != inst.operands[1].reg,
11122 _("source1 and dest must be same register"));
11123
11124 switch (inst.instruction)
11125 {
11126 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11127 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11128 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11129 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11130 default: abort ();
11131 }
11132
11133 inst.instruction |= inst.operands[0].reg;
11134 inst.instruction |= inst.operands[2].reg << 3;
11135 }
11136 else
11137 {
11138 switch (inst.instruction)
11139 {
11140 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11141 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11142 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11143 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11144 default: abort ();
11145 }
11146 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11147 inst.instruction |= inst.operands[0].reg;
11148 inst.instruction |= inst.operands[1].reg << 3;
11149 }
11150 }
11151 }
11152
11153 static void
11154 do_t_simd (void)
11155 {
11156 unsigned Rd, Rn, Rm;
11157
11158 Rd = inst.operands[0].reg;
11159 Rn = inst.operands[1].reg;
11160 Rm = inst.operands[2].reg;
11161
11162 reject_bad_reg (Rd);
11163 reject_bad_reg (Rn);
11164 reject_bad_reg (Rm);
11165
11166 inst.instruction |= Rd << 8;
11167 inst.instruction |= Rn << 16;
11168 inst.instruction |= Rm;
11169 }
11170
11171 static void
11172 do_t_simd2 (void)
11173 {
11174 unsigned Rd, Rn, Rm;
11175
11176 Rd = inst.operands[0].reg;
11177 Rm = inst.operands[1].reg;
11178 Rn = inst.operands[2].reg;
11179
11180 reject_bad_reg (Rd);
11181 reject_bad_reg (Rn);
11182 reject_bad_reg (Rm);
11183
11184 inst.instruction |= Rd << 8;
11185 inst.instruction |= Rn << 16;
11186 inst.instruction |= Rm;
11187 }
11188
11189 static void
11190 do_t_smc (void)
11191 {
11192 unsigned int value = inst.reloc.exp.X_add_number;
11193 constraint (inst.reloc.exp.X_op != O_constant,
11194 _("expression too complex"));
11195 inst.reloc.type = BFD_RELOC_UNUSED;
11196 inst.instruction |= (value & 0xf000) >> 12;
11197 inst.instruction |= (value & 0x0ff0);
11198 inst.instruction |= (value & 0x000f) << 16;
11199 }
11200
11201 static void
11202 do_t_ssat_usat (int bias)
11203 {
11204 unsigned Rd, Rn;
11205
11206 Rd = inst.operands[0].reg;
11207 Rn = inst.operands[2].reg;
11208
11209 reject_bad_reg (Rd);
11210 reject_bad_reg (Rn);
11211
11212 inst.instruction |= Rd << 8;
11213 inst.instruction |= inst.operands[1].imm - bias;
11214 inst.instruction |= Rn << 16;
11215
11216 if (inst.operands[3].present)
11217 {
11218 offsetT shift_amount = inst.reloc.exp.X_add_number;
11219
11220 inst.reloc.type = BFD_RELOC_UNUSED;
11221
11222 constraint (inst.reloc.exp.X_op != O_constant,
11223 _("expression too complex"));
11224
11225 if (shift_amount != 0)
11226 {
11227 constraint (shift_amount > 31,
11228 _("shift expression is too large"));
11229
11230 if (inst.operands[3].shift_kind == SHIFT_ASR)
11231 inst.instruction |= 0x00200000; /* sh bit. */
11232
11233 inst.instruction |= (shift_amount & 0x1c) << 10;
11234 inst.instruction |= (shift_amount & 0x03) << 6;
11235 }
11236 }
11237 }
11238
11239 static void
11240 do_t_ssat (void)
11241 {
11242 do_t_ssat_usat (1);
11243 }
11244
11245 static void
11246 do_t_ssat16 (void)
11247 {
11248 unsigned Rd, Rn;
11249
11250 Rd = inst.operands[0].reg;
11251 Rn = inst.operands[2].reg;
11252
11253 reject_bad_reg (Rd);
11254 reject_bad_reg (Rn);
11255
11256 inst.instruction |= Rd << 8;
11257 inst.instruction |= inst.operands[1].imm - 1;
11258 inst.instruction |= Rn << 16;
11259 }
11260
11261 static void
11262 do_t_strex (void)
11263 {
11264 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
11265 || inst.operands[2].postind || inst.operands[2].writeback
11266 || inst.operands[2].immisreg || inst.operands[2].shifted
11267 || inst.operands[2].negative,
11268 BAD_ADDR_MODE);
11269
11270 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
11271
11272 inst.instruction |= inst.operands[0].reg << 8;
11273 inst.instruction |= inst.operands[1].reg << 12;
11274 inst.instruction |= inst.operands[2].reg << 16;
11275 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11276 }
11277
11278 static void
11279 do_t_strexd (void)
11280 {
11281 if (!inst.operands[2].present)
11282 inst.operands[2].reg = inst.operands[1].reg + 1;
11283
11284 constraint (inst.operands[0].reg == inst.operands[1].reg
11285 || inst.operands[0].reg == inst.operands[2].reg
11286 || inst.operands[0].reg == inst.operands[3].reg,
11287 BAD_OVERLAP);
11288
11289 inst.instruction |= inst.operands[0].reg;
11290 inst.instruction |= inst.operands[1].reg << 12;
11291 inst.instruction |= inst.operands[2].reg << 8;
11292 inst.instruction |= inst.operands[3].reg << 16;
11293 }
11294
11295 static void
11296 do_t_sxtah (void)
11297 {
11298 unsigned Rd, Rn, Rm;
11299
11300 Rd = inst.operands[0].reg;
11301 Rn = inst.operands[1].reg;
11302 Rm = inst.operands[2].reg;
11303
11304 reject_bad_reg (Rd);
11305 reject_bad_reg (Rn);
11306 reject_bad_reg (Rm);
11307
11308 inst.instruction |= Rd << 8;
11309 inst.instruction |= Rn << 16;
11310 inst.instruction |= Rm;
11311 inst.instruction |= inst.operands[3].imm << 4;
11312 }
11313
11314 static void
11315 do_t_sxth (void)
11316 {
11317 unsigned Rd, Rm;
11318
11319 Rd = inst.operands[0].reg;
11320 Rm = inst.operands[1].reg;
11321
11322 reject_bad_reg (Rd);
11323 reject_bad_reg (Rm);
11324
11325 if (inst.instruction <= 0xffff
11326 && inst.size_req != 4
11327 && Rd <= 7 && Rm <= 7
11328 && (!inst.operands[2].present || inst.operands[2].imm == 0))
11329 {
11330 inst.instruction = THUMB_OP16 (inst.instruction);
11331 inst.instruction |= Rd;
11332 inst.instruction |= Rm << 3;
11333 }
11334 else if (unified_syntax)
11335 {
11336 if (inst.instruction <= 0xffff)
11337 inst.instruction = THUMB_OP32 (inst.instruction);
11338 inst.instruction |= Rd << 8;
11339 inst.instruction |= Rm;
11340 inst.instruction |= inst.operands[2].imm << 4;
11341 }
11342 else
11343 {
11344 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
11345 _("Thumb encoding does not support rotation"));
11346 constraint (1, BAD_HIREG);
11347 }
11348 }
11349
11350 static void
11351 do_t_swi (void)
11352 {
11353 inst.reloc.type = BFD_RELOC_ARM_SWI;
11354 }
11355
11356 static void
11357 do_t_tb (void)
11358 {
11359 unsigned Rn, Rm;
11360 int half;
11361
11362 half = (inst.instruction & 0x10) != 0;
11363 set_it_insn_type_last ();
11364 constraint (inst.operands[0].immisreg,
11365 _("instruction requires register index"));
11366
11367 Rn = inst.operands[0].reg;
11368 Rm = inst.operands[0].imm;
11369
11370 constraint (Rn == REG_SP, BAD_SP);
11371 reject_bad_reg (Rm);
11372
11373 constraint (!half && inst.operands[0].shifted,
11374 _("instruction does not allow shifted index"));
11375 inst.instruction |= (Rn << 16) | Rm;
11376 }
11377
11378 static void
11379 do_t_usat (void)
11380 {
11381 do_t_ssat_usat (0);
11382 }
11383
11384 static void
11385 do_t_usat16 (void)
11386 {
11387 unsigned Rd, Rn;
11388
11389 Rd = inst.operands[0].reg;
11390 Rn = inst.operands[2].reg;
11391
11392 reject_bad_reg (Rd);
11393 reject_bad_reg (Rn);
11394
11395 inst.instruction |= Rd << 8;
11396 inst.instruction |= inst.operands[1].imm;
11397 inst.instruction |= Rn << 16;
11398 }
11399
11400 /* Neon instruction encoder helpers. */
11401
11402 /* Encodings for the different types for various Neon opcodes. */
11403
11404 /* An "invalid" code for the following tables. */
11405 #define N_INV -1u
11406
11407 struct neon_tab_entry
11408 {
11409 unsigned integer;
11410 unsigned float_or_poly;
11411 unsigned scalar_or_imm;
11412 };
11413
11414 /* Map overloaded Neon opcodes to their respective encodings. */
11415 #define NEON_ENC_TAB \
11416 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11417 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11418 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11419 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11420 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11421 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11422 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11423 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11424 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11425 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11426 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11427 /* Register variants of the following two instructions are encoded as
11428 vcge / vcgt with the operands reversed. */ \
11429 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11430 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11431 X(vfma, N_INV, 0x0000c10, N_INV), \
11432 X(vfms, N_INV, 0x0200c10, N_INV), \
11433 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11434 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11435 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11436 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11437 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11438 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11439 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11440 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11441 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11442 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11443 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11444 X(vshl, 0x0000400, N_INV, 0x0800510), \
11445 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11446 X(vand, 0x0000110, N_INV, 0x0800030), \
11447 X(vbic, 0x0100110, N_INV, 0x0800030), \
11448 X(veor, 0x1000110, N_INV, N_INV), \
11449 X(vorn, 0x0300110, N_INV, 0x0800010), \
11450 X(vorr, 0x0200110, N_INV, 0x0800010), \
11451 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11452 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11453 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11454 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11455 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11456 X(vst1, 0x0000000, 0x0800000, N_INV), \
11457 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11458 X(vst2, 0x0000100, 0x0800100, N_INV), \
11459 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11460 X(vst3, 0x0000200, 0x0800200, N_INV), \
11461 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11462 X(vst4, 0x0000300, 0x0800300, N_INV), \
11463 X(vmovn, 0x1b20200, N_INV, N_INV), \
11464 X(vtrn, 0x1b20080, N_INV, N_INV), \
11465 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11466 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11467 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11468 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
11469 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
11470 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
11471 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
11472 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11473 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11474 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11475 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11476
11477 enum neon_opc
11478 {
11479 #define X(OPC,I,F,S) N_MNEM_##OPC
11480 NEON_ENC_TAB
11481 #undef X
11482 };
11483
11484 static const struct neon_tab_entry neon_enc_tab[] =
11485 {
11486 #define X(OPC,I,F,S) { (I), (F), (S) }
11487 NEON_ENC_TAB
11488 #undef X
11489 };
11490
11491 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
11492 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11493 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11494 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11495 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11496 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11497 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11498 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11499 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11500 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11501 #define NEON_ENC_SINGLE_(X) \
11502 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11503 #define NEON_ENC_DOUBLE_(X) \
11504 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11505
11506 #define NEON_ENCODE(type, inst) \
11507 do \
11508 { \
11509 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
11510 inst.is_neon = 1; \
11511 } \
11512 while (0)
11513
11514 #define check_neon_suffixes \
11515 do \
11516 { \
11517 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
11518 { \
11519 as_bad (_("invalid neon suffix for non neon instruction")); \
11520 return; \
11521 } \
11522 } \
11523 while (0)
11524
11525 /* Define shapes for instruction operands. The following mnemonic characters
11526 are used in this table:
11527
11528 F - VFP S<n> register
11529 D - Neon D<n> register
11530 Q - Neon Q<n> register
11531 I - Immediate
11532 S - Scalar
11533 R - ARM register
11534 L - D<n> register list
11535
11536 This table is used to generate various data:
11537 - enumerations of the form NS_DDR to be used as arguments to
11538 neon_select_shape.
11539 - a table classifying shapes into single, double, quad, mixed.
11540 - a table used to drive neon_select_shape. */
11541
11542 #define NEON_SHAPE_DEF \
11543 X(3, (D, D, D), DOUBLE), \
11544 X(3, (Q, Q, Q), QUAD), \
11545 X(3, (D, D, I), DOUBLE), \
11546 X(3, (Q, Q, I), QUAD), \
11547 X(3, (D, D, S), DOUBLE), \
11548 X(3, (Q, Q, S), QUAD), \
11549 X(2, (D, D), DOUBLE), \
11550 X(2, (Q, Q), QUAD), \
11551 X(2, (D, S), DOUBLE), \
11552 X(2, (Q, S), QUAD), \
11553 X(2, (D, R), DOUBLE), \
11554 X(2, (Q, R), QUAD), \
11555 X(2, (D, I), DOUBLE), \
11556 X(2, (Q, I), QUAD), \
11557 X(3, (D, L, D), DOUBLE), \
11558 X(2, (D, Q), MIXED), \
11559 X(2, (Q, D), MIXED), \
11560 X(3, (D, Q, I), MIXED), \
11561 X(3, (Q, D, I), MIXED), \
11562 X(3, (Q, D, D), MIXED), \
11563 X(3, (D, Q, Q), MIXED), \
11564 X(3, (Q, Q, D), MIXED), \
11565 X(3, (Q, D, S), MIXED), \
11566 X(3, (D, Q, S), MIXED), \
11567 X(4, (D, D, D, I), DOUBLE), \
11568 X(4, (Q, Q, Q, I), QUAD), \
11569 X(2, (F, F), SINGLE), \
11570 X(3, (F, F, F), SINGLE), \
11571 X(2, (F, I), SINGLE), \
11572 X(2, (F, D), MIXED), \
11573 X(2, (D, F), MIXED), \
11574 X(3, (F, F, I), MIXED), \
11575 X(4, (R, R, F, F), SINGLE), \
11576 X(4, (F, F, R, R), SINGLE), \
11577 X(3, (D, R, R), DOUBLE), \
11578 X(3, (R, R, D), DOUBLE), \
11579 X(2, (S, R), SINGLE), \
11580 X(2, (R, S), SINGLE), \
11581 X(2, (F, R), SINGLE), \
11582 X(2, (R, F), SINGLE)
11583
11584 #define S2(A,B) NS_##A##B
11585 #define S3(A,B,C) NS_##A##B##C
11586 #define S4(A,B,C,D) NS_##A##B##C##D
11587
11588 #define X(N, L, C) S##N L
11589
11590 enum neon_shape
11591 {
11592 NEON_SHAPE_DEF,
11593 NS_NULL
11594 };
11595
11596 #undef X
11597 #undef S2
11598 #undef S3
11599 #undef S4
11600
11601 enum neon_shape_class
11602 {
11603 SC_SINGLE,
11604 SC_DOUBLE,
11605 SC_QUAD,
11606 SC_MIXED
11607 };
11608
11609 #define X(N, L, C) SC_##C
11610
11611 static enum neon_shape_class neon_shape_class[] =
11612 {
11613 NEON_SHAPE_DEF
11614 };
11615
11616 #undef X
11617
11618 enum neon_shape_el
11619 {
11620 SE_F,
11621 SE_D,
11622 SE_Q,
11623 SE_I,
11624 SE_S,
11625 SE_R,
11626 SE_L
11627 };
11628
11629 /* Register widths of above. */
11630 static unsigned neon_shape_el_size[] =
11631 {
11632 32,
11633 64,
11634 128,
11635 0,
11636 32,
11637 32,
11638 0
11639 };
11640
11641 struct neon_shape_info
11642 {
11643 unsigned els;
11644 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
11645 };
11646
11647 #define S2(A,B) { SE_##A, SE_##B }
11648 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11649 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11650
11651 #define X(N, L, C) { N, S##N L }
11652
11653 static struct neon_shape_info neon_shape_tab[] =
11654 {
11655 NEON_SHAPE_DEF
11656 };
11657
11658 #undef X
11659 #undef S2
11660 #undef S3
11661 #undef S4
11662
11663 /* Bit masks used in type checking given instructions.
11664 'N_EQK' means the type must be the same as (or based on in some way) the key
11665 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11666 set, various other bits can be set as well in order to modify the meaning of
11667 the type constraint. */
11668
11669 enum neon_type_mask
11670 {
11671 N_S8 = 0x0000001,
11672 N_S16 = 0x0000002,
11673 N_S32 = 0x0000004,
11674 N_S64 = 0x0000008,
11675 N_U8 = 0x0000010,
11676 N_U16 = 0x0000020,
11677 N_U32 = 0x0000040,
11678 N_U64 = 0x0000080,
11679 N_I8 = 0x0000100,
11680 N_I16 = 0x0000200,
11681 N_I32 = 0x0000400,
11682 N_I64 = 0x0000800,
11683 N_8 = 0x0001000,
11684 N_16 = 0x0002000,
11685 N_32 = 0x0004000,
11686 N_64 = 0x0008000,
11687 N_P8 = 0x0010000,
11688 N_P16 = 0x0020000,
11689 N_F16 = 0x0040000,
11690 N_F32 = 0x0080000,
11691 N_F64 = 0x0100000,
11692 N_KEY = 0x1000000, /* Key element (main type specifier). */
11693 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
11694 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
11695 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
11696 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
11697 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11698 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11699 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11700 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
11701 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11702 N_UTYP = 0,
11703 N_MAX_NONSPECIAL = N_F64
11704 };
11705
11706 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11707
11708 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11709 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11710 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11711 #define N_SUF_32 (N_SU_32 | N_F32)
11712 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11713 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11714
11715 /* Pass this as the first type argument to neon_check_type to ignore types
11716 altogether. */
11717 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11718
11719 /* Select a "shape" for the current instruction (describing register types or
11720 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11721 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11722 function of operand parsing, so this function doesn't need to be called.
11723 Shapes should be listed in order of decreasing length. */
11724
11725 static enum neon_shape
11726 neon_select_shape (enum neon_shape shape, ...)
11727 {
11728 va_list ap;
11729 enum neon_shape first_shape = shape;
11730
11731 /* Fix missing optional operands. FIXME: we don't know at this point how
11732 many arguments we should have, so this makes the assumption that we have
11733 > 1. This is true of all current Neon opcodes, I think, but may not be
11734 true in the future. */
11735 if (!inst.operands[1].present)
11736 inst.operands[1] = inst.operands[0];
11737
11738 va_start (ap, shape);
11739
11740 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
11741 {
11742 unsigned j;
11743 int matches = 1;
11744
11745 for (j = 0; j < neon_shape_tab[shape].els; j++)
11746 {
11747 if (!inst.operands[j].present)
11748 {
11749 matches = 0;
11750 break;
11751 }
11752
11753 switch (neon_shape_tab[shape].el[j])
11754 {
11755 case SE_F:
11756 if (!(inst.operands[j].isreg
11757 && inst.operands[j].isvec
11758 && inst.operands[j].issingle
11759 && !inst.operands[j].isquad))
11760 matches = 0;
11761 break;
11762
11763 case SE_D:
11764 if (!(inst.operands[j].isreg
11765 && inst.operands[j].isvec
11766 && !inst.operands[j].isquad
11767 && !inst.operands[j].issingle))
11768 matches = 0;
11769 break;
11770
11771 case SE_R:
11772 if (!(inst.operands[j].isreg
11773 && !inst.operands[j].isvec))
11774 matches = 0;
11775 break;
11776
11777 case SE_Q:
11778 if (!(inst.operands[j].isreg
11779 && inst.operands[j].isvec
11780 && inst.operands[j].isquad
11781 && !inst.operands[j].issingle))
11782 matches = 0;
11783 break;
11784
11785 case SE_I:
11786 if (!(!inst.operands[j].isreg
11787 && !inst.operands[j].isscalar))
11788 matches = 0;
11789 break;
11790
11791 case SE_S:
11792 if (!(!inst.operands[j].isreg
11793 && inst.operands[j].isscalar))
11794 matches = 0;
11795 break;
11796
11797 case SE_L:
11798 break;
11799 }
11800 if (!matches)
11801 break;
11802 }
11803 if (matches)
11804 break;
11805 }
11806
11807 va_end (ap);
11808
11809 if (shape == NS_NULL && first_shape != NS_NULL)
11810 first_error (_("invalid instruction shape"));
11811
11812 return shape;
11813 }
11814
11815 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11816 means the Q bit should be set). */
11817
11818 static int
11819 neon_quad (enum neon_shape shape)
11820 {
11821 return neon_shape_class[shape] == SC_QUAD;
11822 }
11823
11824 static void
11825 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
11826 unsigned *g_size)
11827 {
11828 /* Allow modification to be made to types which are constrained to be
11829 based on the key element, based on bits set alongside N_EQK. */
11830 if ((typebits & N_EQK) != 0)
11831 {
11832 if ((typebits & N_HLF) != 0)
11833 *g_size /= 2;
11834 else if ((typebits & N_DBL) != 0)
11835 *g_size *= 2;
11836 if ((typebits & N_SGN) != 0)
11837 *g_type = NT_signed;
11838 else if ((typebits & N_UNS) != 0)
11839 *g_type = NT_unsigned;
11840 else if ((typebits & N_INT) != 0)
11841 *g_type = NT_integer;
11842 else if ((typebits & N_FLT) != 0)
11843 *g_type = NT_float;
11844 else if ((typebits & N_SIZ) != 0)
11845 *g_type = NT_untyped;
11846 }
11847 }
11848
11849 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11850 operand type, i.e. the single type specified in a Neon instruction when it
11851 is the only one given. */
11852
11853 static struct neon_type_el
11854 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
11855 {
11856 struct neon_type_el dest = *key;
11857
11858 gas_assert ((thisarg & N_EQK) != 0);
11859
11860 neon_modify_type_size (thisarg, &dest.type, &dest.size);
11861
11862 return dest;
11863 }
11864
11865 /* Convert Neon type and size into compact bitmask representation. */
11866
11867 static enum neon_type_mask
11868 type_chk_of_el_type (enum neon_el_type type, unsigned size)
11869 {
11870 switch (type)
11871 {
11872 case NT_untyped:
11873 switch (size)
11874 {
11875 case 8: return N_8;
11876 case 16: return N_16;
11877 case 32: return N_32;
11878 case 64: return N_64;
11879 default: ;
11880 }
11881 break;
11882
11883 case NT_integer:
11884 switch (size)
11885 {
11886 case 8: return N_I8;
11887 case 16: return N_I16;
11888 case 32: return N_I32;
11889 case 64: return N_I64;
11890 default: ;
11891 }
11892 break;
11893
11894 case NT_float:
11895 switch (size)
11896 {
11897 case 16: return N_F16;
11898 case 32: return N_F32;
11899 case 64: return N_F64;
11900 default: ;
11901 }
11902 break;
11903
11904 case NT_poly:
11905 switch (size)
11906 {
11907 case 8: return N_P8;
11908 case 16: return N_P16;
11909 default: ;
11910 }
11911 break;
11912
11913 case NT_signed:
11914 switch (size)
11915 {
11916 case 8: return N_S8;
11917 case 16: return N_S16;
11918 case 32: return N_S32;
11919 case 64: return N_S64;
11920 default: ;
11921 }
11922 break;
11923
11924 case NT_unsigned:
11925 switch (size)
11926 {
11927 case 8: return N_U8;
11928 case 16: return N_U16;
11929 case 32: return N_U32;
11930 case 64: return N_U64;
11931 default: ;
11932 }
11933 break;
11934
11935 default: ;
11936 }
11937
11938 return N_UTYP;
11939 }
11940
11941 /* Convert compact Neon bitmask type representation to a type and size. Only
11942 handles the case where a single bit is set in the mask. */
11943
11944 static int
11945 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
11946 enum neon_type_mask mask)
11947 {
11948 if ((mask & N_EQK) != 0)
11949 return FAIL;
11950
11951 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
11952 *size = 8;
11953 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
11954 *size = 16;
11955 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
11956 *size = 32;
11957 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
11958 *size = 64;
11959 else
11960 return FAIL;
11961
11962 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
11963 *type = NT_signed;
11964 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
11965 *type = NT_unsigned;
11966 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
11967 *type = NT_integer;
11968 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
11969 *type = NT_untyped;
11970 else if ((mask & (N_P8 | N_P16)) != 0)
11971 *type = NT_poly;
11972 else if ((mask & (N_F32 | N_F64)) != 0)
11973 *type = NT_float;
11974 else
11975 return FAIL;
11976
11977 return SUCCESS;
11978 }
11979
11980 /* Modify a bitmask of allowed types. This is only needed for type
11981 relaxation. */
11982
11983 static unsigned
11984 modify_types_allowed (unsigned allowed, unsigned mods)
11985 {
11986 unsigned size;
11987 enum neon_el_type type;
11988 unsigned destmask;
11989 int i;
11990
11991 destmask = 0;
11992
11993 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
11994 {
11995 if (el_type_of_type_chk (&type, &size,
11996 (enum neon_type_mask) (allowed & i)) == SUCCESS)
11997 {
11998 neon_modify_type_size (mods, &type, &size);
11999 destmask |= type_chk_of_el_type (type, size);
12000 }
12001 }
12002
12003 return destmask;
12004 }
12005
12006 /* Check type and return type classification.
12007 The manual states (paraphrase): If one datatype is given, it indicates the
12008 type given in:
12009 - the second operand, if there is one
12010 - the operand, if there is no second operand
12011 - the result, if there are no operands.
12012 This isn't quite good enough though, so we use a concept of a "key" datatype
12013 which is set on a per-instruction basis, which is the one which matters when
12014 only one data type is written.
12015 Note: this function has side-effects (e.g. filling in missing operands). All
12016 Neon instructions should call it before performing bit encoding. */
12017
12018 static struct neon_type_el
12019 neon_check_type (unsigned els, enum neon_shape ns, ...)
12020 {
12021 va_list ap;
12022 unsigned i, pass, key_el = 0;
12023 unsigned types[NEON_MAX_TYPE_ELS];
12024 enum neon_el_type k_type = NT_invtype;
12025 unsigned k_size = -1u;
12026 struct neon_type_el badtype = {NT_invtype, -1};
12027 unsigned key_allowed = 0;
12028
12029 /* Optional registers in Neon instructions are always (not) in operand 1.
12030 Fill in the missing operand here, if it was omitted. */
12031 if (els > 1 && !inst.operands[1].present)
12032 inst.operands[1] = inst.operands[0];
12033
12034 /* Suck up all the varargs. */
12035 va_start (ap, ns);
12036 for (i = 0; i < els; i++)
12037 {
12038 unsigned thisarg = va_arg (ap, unsigned);
12039 if (thisarg == N_IGNORE_TYPE)
12040 {
12041 va_end (ap);
12042 return badtype;
12043 }
12044 types[i] = thisarg;
12045 if ((thisarg & N_KEY) != 0)
12046 key_el = i;
12047 }
12048 va_end (ap);
12049
12050 if (inst.vectype.elems > 0)
12051 for (i = 0; i < els; i++)
12052 if (inst.operands[i].vectype.type != NT_invtype)
12053 {
12054 first_error (_("types specified in both the mnemonic and operands"));
12055 return badtype;
12056 }
12057
12058 /* Duplicate inst.vectype elements here as necessary.
12059 FIXME: No idea if this is exactly the same as the ARM assembler,
12060 particularly when an insn takes one register and one non-register
12061 operand. */
12062 if (inst.vectype.elems == 1 && els > 1)
12063 {
12064 unsigned j;
12065 inst.vectype.elems = els;
12066 inst.vectype.el[key_el] = inst.vectype.el[0];
12067 for (j = 0; j < els; j++)
12068 if (j != key_el)
12069 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12070 types[j]);
12071 }
12072 else if (inst.vectype.elems == 0 && els > 0)
12073 {
12074 unsigned j;
12075 /* No types were given after the mnemonic, so look for types specified
12076 after each operand. We allow some flexibility here; as long as the
12077 "key" operand has a type, we can infer the others. */
12078 for (j = 0; j < els; j++)
12079 if (inst.operands[j].vectype.type != NT_invtype)
12080 inst.vectype.el[j] = inst.operands[j].vectype;
12081
12082 if (inst.operands[key_el].vectype.type != NT_invtype)
12083 {
12084 for (j = 0; j < els; j++)
12085 if (inst.operands[j].vectype.type == NT_invtype)
12086 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12087 types[j]);
12088 }
12089 else
12090 {
12091 first_error (_("operand types can't be inferred"));
12092 return badtype;
12093 }
12094 }
12095 else if (inst.vectype.elems != els)
12096 {
12097 first_error (_("type specifier has the wrong number of parts"));
12098 return badtype;
12099 }
12100
12101 for (pass = 0; pass < 2; pass++)
12102 {
12103 for (i = 0; i < els; i++)
12104 {
12105 unsigned thisarg = types[i];
12106 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12107 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12108 enum neon_el_type g_type = inst.vectype.el[i].type;
12109 unsigned g_size = inst.vectype.el[i].size;
12110
12111 /* Decay more-specific signed & unsigned types to sign-insensitive
12112 integer types if sign-specific variants are unavailable. */
12113 if ((g_type == NT_signed || g_type == NT_unsigned)
12114 && (types_allowed & N_SU_ALL) == 0)
12115 g_type = NT_integer;
12116
12117 /* If only untyped args are allowed, decay any more specific types to
12118 them. Some instructions only care about signs for some element
12119 sizes, so handle that properly. */
12120 if ((g_size == 8 && (types_allowed & N_8) != 0)
12121 || (g_size == 16 && (types_allowed & N_16) != 0)
12122 || (g_size == 32 && (types_allowed & N_32) != 0)
12123 || (g_size == 64 && (types_allowed & N_64) != 0))
12124 g_type = NT_untyped;
12125
12126 if (pass == 0)
12127 {
12128 if ((thisarg & N_KEY) != 0)
12129 {
12130 k_type = g_type;
12131 k_size = g_size;
12132 key_allowed = thisarg & ~N_KEY;
12133 }
12134 }
12135 else
12136 {
12137 if ((thisarg & N_VFP) != 0)
12138 {
12139 enum neon_shape_el regshape;
12140 unsigned regwidth, match;
12141
12142 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
12143 if (ns == NS_NULL)
12144 {
12145 first_error (_("invalid instruction shape"));
12146 return badtype;
12147 }
12148 regshape = neon_shape_tab[ns].el[i];
12149 regwidth = neon_shape_el_size[regshape];
12150
12151 /* In VFP mode, operands must match register widths. If we
12152 have a key operand, use its width, else use the width of
12153 the current operand. */
12154 if (k_size != -1u)
12155 match = k_size;
12156 else
12157 match = g_size;
12158
12159 if (regwidth != match)
12160 {
12161 first_error (_("operand size must match register width"));
12162 return badtype;
12163 }
12164 }
12165
12166 if ((thisarg & N_EQK) == 0)
12167 {
12168 unsigned given_type = type_chk_of_el_type (g_type, g_size);
12169
12170 if ((given_type & types_allowed) == 0)
12171 {
12172 first_error (_("bad type in Neon instruction"));
12173 return badtype;
12174 }
12175 }
12176 else
12177 {
12178 enum neon_el_type mod_k_type = k_type;
12179 unsigned mod_k_size = k_size;
12180 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
12181 if (g_type != mod_k_type || g_size != mod_k_size)
12182 {
12183 first_error (_("inconsistent types in Neon instruction"));
12184 return badtype;
12185 }
12186 }
12187 }
12188 }
12189 }
12190
12191 return inst.vectype.el[key_el];
12192 }
12193
12194 /* Neon-style VFP instruction forwarding. */
12195
12196 /* Thumb VFP instructions have 0xE in the condition field. */
12197
12198 static void
12199 do_vfp_cond_or_thumb (void)
12200 {
12201 inst.is_neon = 1;
12202
12203 if (thumb_mode)
12204 inst.instruction |= 0xe0000000;
12205 else
12206 inst.instruction |= inst.cond << 28;
12207 }
12208
12209 /* Look up and encode a simple mnemonic, for use as a helper function for the
12210 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12211 etc. It is assumed that operand parsing has already been done, and that the
12212 operands are in the form expected by the given opcode (this isn't necessarily
12213 the same as the form in which they were parsed, hence some massaging must
12214 take place before this function is called).
12215 Checks current arch version against that in the looked-up opcode. */
12216
12217 static void
12218 do_vfp_nsyn_opcode (const char *opname)
12219 {
12220 const struct asm_opcode *opcode;
12221
12222 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
12223
12224 if (!opcode)
12225 abort ();
12226
12227 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
12228 thumb_mode ? *opcode->tvariant : *opcode->avariant),
12229 _(BAD_FPU));
12230
12231 inst.is_neon = 1;
12232
12233 if (thumb_mode)
12234 {
12235 inst.instruction = opcode->tvalue;
12236 opcode->tencode ();
12237 }
12238 else
12239 {
12240 inst.instruction = (inst.cond << 28) | opcode->avalue;
12241 opcode->aencode ();
12242 }
12243 }
12244
12245 static void
12246 do_vfp_nsyn_add_sub (enum neon_shape rs)
12247 {
12248 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
12249
12250 if (rs == NS_FFF)
12251 {
12252 if (is_add)
12253 do_vfp_nsyn_opcode ("fadds");
12254 else
12255 do_vfp_nsyn_opcode ("fsubs");
12256 }
12257 else
12258 {
12259 if (is_add)
12260 do_vfp_nsyn_opcode ("faddd");
12261 else
12262 do_vfp_nsyn_opcode ("fsubd");
12263 }
12264 }
12265
12266 /* Check operand types to see if this is a VFP instruction, and if so call
12267 PFN (). */
12268
12269 static int
12270 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
12271 {
12272 enum neon_shape rs;
12273 struct neon_type_el et;
12274
12275 switch (args)
12276 {
12277 case 2:
12278 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12279 et = neon_check_type (2, rs,
12280 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12281 break;
12282
12283 case 3:
12284 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12285 et = neon_check_type (3, rs,
12286 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12287 break;
12288
12289 default:
12290 abort ();
12291 }
12292
12293 if (et.type != NT_invtype)
12294 {
12295 pfn (rs);
12296 return SUCCESS;
12297 }
12298
12299 inst.error = NULL;
12300 return FAIL;
12301 }
12302
12303 static void
12304 do_vfp_nsyn_mla_mls (enum neon_shape rs)
12305 {
12306 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
12307
12308 if (rs == NS_FFF)
12309 {
12310 if (is_mla)
12311 do_vfp_nsyn_opcode ("fmacs");
12312 else
12313 do_vfp_nsyn_opcode ("fnmacs");
12314 }
12315 else
12316 {
12317 if (is_mla)
12318 do_vfp_nsyn_opcode ("fmacd");
12319 else
12320 do_vfp_nsyn_opcode ("fnmacd");
12321 }
12322 }
12323
12324 static void
12325 do_vfp_nsyn_fma_fms (enum neon_shape rs)
12326 {
12327 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
12328
12329 if (rs == NS_FFF)
12330 {
12331 if (is_fma)
12332 do_vfp_nsyn_opcode ("ffmas");
12333 else
12334 do_vfp_nsyn_opcode ("ffnmas");
12335 }
12336 else
12337 {
12338 if (is_fma)
12339 do_vfp_nsyn_opcode ("ffmad");
12340 else
12341 do_vfp_nsyn_opcode ("ffnmad");
12342 }
12343 }
12344
12345 static void
12346 do_vfp_nsyn_mul (enum neon_shape rs)
12347 {
12348 if (rs == NS_FFF)
12349 do_vfp_nsyn_opcode ("fmuls");
12350 else
12351 do_vfp_nsyn_opcode ("fmuld");
12352 }
12353
12354 static void
12355 do_vfp_nsyn_abs_neg (enum neon_shape rs)
12356 {
12357 int is_neg = (inst.instruction & 0x80) != 0;
12358 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
12359
12360 if (rs == NS_FF)
12361 {
12362 if (is_neg)
12363 do_vfp_nsyn_opcode ("fnegs");
12364 else
12365 do_vfp_nsyn_opcode ("fabss");
12366 }
12367 else
12368 {
12369 if (is_neg)
12370 do_vfp_nsyn_opcode ("fnegd");
12371 else
12372 do_vfp_nsyn_opcode ("fabsd");
12373 }
12374 }
12375
12376 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12377 insns belong to Neon, and are handled elsewhere. */
12378
12379 static void
12380 do_vfp_nsyn_ldm_stm (int is_dbmode)
12381 {
12382 int is_ldm = (inst.instruction & (1 << 20)) != 0;
12383 if (is_ldm)
12384 {
12385 if (is_dbmode)
12386 do_vfp_nsyn_opcode ("fldmdbs");
12387 else
12388 do_vfp_nsyn_opcode ("fldmias");
12389 }
12390 else
12391 {
12392 if (is_dbmode)
12393 do_vfp_nsyn_opcode ("fstmdbs");
12394 else
12395 do_vfp_nsyn_opcode ("fstmias");
12396 }
12397 }
12398
12399 static void
12400 do_vfp_nsyn_sqrt (void)
12401 {
12402 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12403 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12404
12405 if (rs == NS_FF)
12406 do_vfp_nsyn_opcode ("fsqrts");
12407 else
12408 do_vfp_nsyn_opcode ("fsqrtd");
12409 }
12410
12411 static void
12412 do_vfp_nsyn_div (void)
12413 {
12414 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12415 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12416 N_F32 | N_F64 | N_KEY | N_VFP);
12417
12418 if (rs == NS_FFF)
12419 do_vfp_nsyn_opcode ("fdivs");
12420 else
12421 do_vfp_nsyn_opcode ("fdivd");
12422 }
12423
12424 static void
12425 do_vfp_nsyn_nmul (void)
12426 {
12427 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12428 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12429 N_F32 | N_F64 | N_KEY | N_VFP);
12430
12431 if (rs == NS_FFF)
12432 {
12433 NEON_ENCODE (SINGLE, inst);
12434 do_vfp_sp_dyadic ();
12435 }
12436 else
12437 {
12438 NEON_ENCODE (DOUBLE, inst);
12439 do_vfp_dp_rd_rn_rm ();
12440 }
12441 do_vfp_cond_or_thumb ();
12442 }
12443
12444 static void
12445 do_vfp_nsyn_cmp (void)
12446 {
12447 if (inst.operands[1].isreg)
12448 {
12449 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12450 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12451
12452 if (rs == NS_FF)
12453 {
12454 NEON_ENCODE (SINGLE, inst);
12455 do_vfp_sp_monadic ();
12456 }
12457 else
12458 {
12459 NEON_ENCODE (DOUBLE, inst);
12460 do_vfp_dp_rd_rm ();
12461 }
12462 }
12463 else
12464 {
12465 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
12466 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
12467
12468 switch (inst.instruction & 0x0fffffff)
12469 {
12470 case N_MNEM_vcmp:
12471 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
12472 break;
12473 case N_MNEM_vcmpe:
12474 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
12475 break;
12476 default:
12477 abort ();
12478 }
12479
12480 if (rs == NS_FI)
12481 {
12482 NEON_ENCODE (SINGLE, inst);
12483 do_vfp_sp_compare_z ();
12484 }
12485 else
12486 {
12487 NEON_ENCODE (DOUBLE, inst);
12488 do_vfp_dp_rd ();
12489 }
12490 }
12491 do_vfp_cond_or_thumb ();
12492 }
12493
12494 static void
12495 nsyn_insert_sp (void)
12496 {
12497 inst.operands[1] = inst.operands[0];
12498 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
12499 inst.operands[0].reg = REG_SP;
12500 inst.operands[0].isreg = 1;
12501 inst.operands[0].writeback = 1;
12502 inst.operands[0].present = 1;
12503 }
12504
12505 static void
12506 do_vfp_nsyn_push (void)
12507 {
12508 nsyn_insert_sp ();
12509 if (inst.operands[1].issingle)
12510 do_vfp_nsyn_opcode ("fstmdbs");
12511 else
12512 do_vfp_nsyn_opcode ("fstmdbd");
12513 }
12514
12515 static void
12516 do_vfp_nsyn_pop (void)
12517 {
12518 nsyn_insert_sp ();
12519 if (inst.operands[1].issingle)
12520 do_vfp_nsyn_opcode ("fldmias");
12521 else
12522 do_vfp_nsyn_opcode ("fldmiad");
12523 }
12524
12525 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12526 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12527
12528 static void
12529 neon_dp_fixup (struct arm_it* insn)
12530 {
12531 unsigned int i = insn->instruction;
12532 insn->is_neon = 1;
12533
12534 if (thumb_mode)
12535 {
12536 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12537 if (i & (1 << 24))
12538 i |= 1 << 28;
12539
12540 i &= ~(1 << 24);
12541
12542 i |= 0xef000000;
12543 }
12544 else
12545 i |= 0xf2000000;
12546
12547 insn->instruction = i;
12548 }
12549
12550 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12551 (0, 1, 2, 3). */
12552
12553 static unsigned
12554 neon_logbits (unsigned x)
12555 {
12556 return ffs (x) - 4;
12557 }
12558
12559 #define LOW4(R) ((R) & 0xf)
12560 #define HI1(R) (((R) >> 4) & 1)
12561
12562 /* Encode insns with bit pattern:
12563
12564 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12565 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12566
12567 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12568 different meaning for some instruction. */
12569
12570 static void
12571 neon_three_same (int isquad, int ubit, int size)
12572 {
12573 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12574 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12575 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12576 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12577 inst.instruction |= LOW4 (inst.operands[2].reg);
12578 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12579 inst.instruction |= (isquad != 0) << 6;
12580 inst.instruction |= (ubit != 0) << 24;
12581 if (size != -1)
12582 inst.instruction |= neon_logbits (size) << 20;
12583
12584 neon_dp_fixup (&inst);
12585 }
12586
12587 /* Encode instructions of the form:
12588
12589 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12590 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12591
12592 Don't write size if SIZE == -1. */
12593
12594 static void
12595 neon_two_same (int qbit, int ubit, int size)
12596 {
12597 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12598 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12599 inst.instruction |= LOW4 (inst.operands[1].reg);
12600 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12601 inst.instruction |= (qbit != 0) << 6;
12602 inst.instruction |= (ubit != 0) << 24;
12603
12604 if (size != -1)
12605 inst.instruction |= neon_logbits (size) << 18;
12606
12607 neon_dp_fixup (&inst);
12608 }
12609
12610 /* Neon instruction encoders, in approximate order of appearance. */
12611
12612 static void
12613 do_neon_dyadic_i_su (void)
12614 {
12615 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12616 struct neon_type_el et = neon_check_type (3, rs,
12617 N_EQK, N_EQK, N_SU_32 | N_KEY);
12618 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12619 }
12620
12621 static void
12622 do_neon_dyadic_i64_su (void)
12623 {
12624 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12625 struct neon_type_el et = neon_check_type (3, rs,
12626 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12627 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12628 }
12629
12630 static void
12631 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
12632 unsigned immbits)
12633 {
12634 unsigned size = et.size >> 3;
12635 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12636 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12637 inst.instruction |= LOW4 (inst.operands[1].reg);
12638 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12639 inst.instruction |= (isquad != 0) << 6;
12640 inst.instruction |= immbits << 16;
12641 inst.instruction |= (size >> 3) << 7;
12642 inst.instruction |= (size & 0x7) << 19;
12643 if (write_ubit)
12644 inst.instruction |= (uval != 0) << 24;
12645
12646 neon_dp_fixup (&inst);
12647 }
12648
12649 static void
12650 do_neon_shl_imm (void)
12651 {
12652 if (!inst.operands[2].isreg)
12653 {
12654 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12655 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
12656 NEON_ENCODE (IMMED, inst);
12657 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
12658 }
12659 else
12660 {
12661 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12662 struct neon_type_el et = neon_check_type (3, rs,
12663 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12664 unsigned int tmp;
12665
12666 /* VSHL/VQSHL 3-register variants have syntax such as:
12667 vshl.xx Dd, Dm, Dn
12668 whereas other 3-register operations encoded by neon_three_same have
12669 syntax like:
12670 vadd.xx Dd, Dn, Dm
12671 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12672 here. */
12673 tmp = inst.operands[2].reg;
12674 inst.operands[2].reg = inst.operands[1].reg;
12675 inst.operands[1].reg = tmp;
12676 NEON_ENCODE (INTEGER, inst);
12677 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12678 }
12679 }
12680
12681 static void
12682 do_neon_qshl_imm (void)
12683 {
12684 if (!inst.operands[2].isreg)
12685 {
12686 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12687 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
12688
12689 NEON_ENCODE (IMMED, inst);
12690 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
12691 inst.operands[2].imm);
12692 }
12693 else
12694 {
12695 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12696 struct neon_type_el et = neon_check_type (3, rs,
12697 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12698 unsigned int tmp;
12699
12700 /* See note in do_neon_shl_imm. */
12701 tmp = inst.operands[2].reg;
12702 inst.operands[2].reg = inst.operands[1].reg;
12703 inst.operands[1].reg = tmp;
12704 NEON_ENCODE (INTEGER, inst);
12705 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12706 }
12707 }
12708
12709 static void
12710 do_neon_rshl (void)
12711 {
12712 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12713 struct neon_type_el et = neon_check_type (3, rs,
12714 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12715 unsigned int tmp;
12716
12717 tmp = inst.operands[2].reg;
12718 inst.operands[2].reg = inst.operands[1].reg;
12719 inst.operands[1].reg = tmp;
12720 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12721 }
12722
12723 static int
12724 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
12725 {
12726 /* Handle .I8 pseudo-instructions. */
12727 if (size == 8)
12728 {
12729 /* Unfortunately, this will make everything apart from zero out-of-range.
12730 FIXME is this the intended semantics? There doesn't seem much point in
12731 accepting .I8 if so. */
12732 immediate |= immediate << 8;
12733 size = 16;
12734 }
12735
12736 if (size >= 32)
12737 {
12738 if (immediate == (immediate & 0x000000ff))
12739 {
12740 *immbits = immediate;
12741 return 0x1;
12742 }
12743 else if (immediate == (immediate & 0x0000ff00))
12744 {
12745 *immbits = immediate >> 8;
12746 return 0x3;
12747 }
12748 else if (immediate == (immediate & 0x00ff0000))
12749 {
12750 *immbits = immediate >> 16;
12751 return 0x5;
12752 }
12753 else if (immediate == (immediate & 0xff000000))
12754 {
12755 *immbits = immediate >> 24;
12756 return 0x7;
12757 }
12758 if ((immediate & 0xffff) != (immediate >> 16))
12759 goto bad_immediate;
12760 immediate &= 0xffff;
12761 }
12762
12763 if (immediate == (immediate & 0x000000ff))
12764 {
12765 *immbits = immediate;
12766 return 0x9;
12767 }
12768 else if (immediate == (immediate & 0x0000ff00))
12769 {
12770 *immbits = immediate >> 8;
12771 return 0xb;
12772 }
12773
12774 bad_immediate:
12775 first_error (_("immediate value out of range"));
12776 return FAIL;
12777 }
12778
12779 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12780 A, B, C, D. */
12781
12782 static int
12783 neon_bits_same_in_bytes (unsigned imm)
12784 {
12785 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
12786 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
12787 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
12788 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
12789 }
12790
12791 /* For immediate of above form, return 0bABCD. */
12792
12793 static unsigned
12794 neon_squash_bits (unsigned imm)
12795 {
12796 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
12797 | ((imm & 0x01000000) >> 21);
12798 }
12799
12800 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12801
12802 static unsigned
12803 neon_qfloat_bits (unsigned imm)
12804 {
12805 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
12806 }
12807
12808 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12809 the instruction. *OP is passed as the initial value of the op field, and
12810 may be set to a different value depending on the constant (i.e.
12811 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12812 MVN). If the immediate looks like a repeated pattern then also
12813 try smaller element sizes. */
12814
12815 static int
12816 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
12817 unsigned *immbits, int *op, int size,
12818 enum neon_el_type type)
12819 {
12820 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12821 float. */
12822 if (type == NT_float && !float_p)
12823 return FAIL;
12824
12825 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
12826 {
12827 if (size != 32 || *op == 1)
12828 return FAIL;
12829 *immbits = neon_qfloat_bits (immlo);
12830 return 0xf;
12831 }
12832
12833 if (size == 64)
12834 {
12835 if (neon_bits_same_in_bytes (immhi)
12836 && neon_bits_same_in_bytes (immlo))
12837 {
12838 if (*op == 1)
12839 return FAIL;
12840 *immbits = (neon_squash_bits (immhi) << 4)
12841 | neon_squash_bits (immlo);
12842 *op = 1;
12843 return 0xe;
12844 }
12845
12846 if (immhi != immlo)
12847 return FAIL;
12848 }
12849
12850 if (size >= 32)
12851 {
12852 if (immlo == (immlo & 0x000000ff))
12853 {
12854 *immbits = immlo;
12855 return 0x0;
12856 }
12857 else if (immlo == (immlo & 0x0000ff00))
12858 {
12859 *immbits = immlo >> 8;
12860 return 0x2;
12861 }
12862 else if (immlo == (immlo & 0x00ff0000))
12863 {
12864 *immbits = immlo >> 16;
12865 return 0x4;
12866 }
12867 else if (immlo == (immlo & 0xff000000))
12868 {
12869 *immbits = immlo >> 24;
12870 return 0x6;
12871 }
12872 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
12873 {
12874 *immbits = (immlo >> 8) & 0xff;
12875 return 0xc;
12876 }
12877 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
12878 {
12879 *immbits = (immlo >> 16) & 0xff;
12880 return 0xd;
12881 }
12882
12883 if ((immlo & 0xffff) != (immlo >> 16))
12884 return FAIL;
12885 immlo &= 0xffff;
12886 }
12887
12888 if (size >= 16)
12889 {
12890 if (immlo == (immlo & 0x000000ff))
12891 {
12892 *immbits = immlo;
12893 return 0x8;
12894 }
12895 else if (immlo == (immlo & 0x0000ff00))
12896 {
12897 *immbits = immlo >> 8;
12898 return 0xa;
12899 }
12900
12901 if ((immlo & 0xff) != (immlo >> 8))
12902 return FAIL;
12903 immlo &= 0xff;
12904 }
12905
12906 if (immlo == (immlo & 0x000000ff))
12907 {
12908 /* Don't allow MVN with 8-bit immediate. */
12909 if (*op == 1)
12910 return FAIL;
12911 *immbits = immlo;
12912 return 0xe;
12913 }
12914
12915 return FAIL;
12916 }
12917
12918 /* Write immediate bits [7:0] to the following locations:
12919
12920 |28/24|23 19|18 16|15 4|3 0|
12921 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12922
12923 This function is used by VMOV/VMVN/VORR/VBIC. */
12924
12925 static void
12926 neon_write_immbits (unsigned immbits)
12927 {
12928 inst.instruction |= immbits & 0xf;
12929 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
12930 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
12931 }
12932
12933 /* Invert low-order SIZE bits of XHI:XLO. */
12934
12935 static void
12936 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
12937 {
12938 unsigned immlo = xlo ? *xlo : 0;
12939 unsigned immhi = xhi ? *xhi : 0;
12940
12941 switch (size)
12942 {
12943 case 8:
12944 immlo = (~immlo) & 0xff;
12945 break;
12946
12947 case 16:
12948 immlo = (~immlo) & 0xffff;
12949 break;
12950
12951 case 64:
12952 immhi = (~immhi) & 0xffffffff;
12953 /* fall through. */
12954
12955 case 32:
12956 immlo = (~immlo) & 0xffffffff;
12957 break;
12958
12959 default:
12960 abort ();
12961 }
12962
12963 if (xlo)
12964 *xlo = immlo;
12965
12966 if (xhi)
12967 *xhi = immhi;
12968 }
12969
12970 static void
12971 do_neon_logic (void)
12972 {
12973 if (inst.operands[2].present && inst.operands[2].isreg)
12974 {
12975 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12976 neon_check_type (3, rs, N_IGNORE_TYPE);
12977 /* U bit and size field were set as part of the bitmask. */
12978 NEON_ENCODE (INTEGER, inst);
12979 neon_three_same (neon_quad (rs), 0, -1);
12980 }
12981 else
12982 {
12983 const int three_ops_form = (inst.operands[2].present
12984 && !inst.operands[2].isreg);
12985 const int immoperand = (three_ops_form ? 2 : 1);
12986 enum neon_shape rs = (three_ops_form
12987 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
12988 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
12989 struct neon_type_el et = neon_check_type (2, rs,
12990 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12991 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
12992 unsigned immbits;
12993 int cmode;
12994
12995 if (et.type == NT_invtype)
12996 return;
12997
12998 if (three_ops_form)
12999 constraint (inst.operands[0].reg != inst.operands[1].reg,
13000 _("first and second operands shall be the same register"));
13001
13002 NEON_ENCODE (IMMED, inst);
13003
13004 immbits = inst.operands[immoperand].imm;
13005 if (et.size == 64)
13006 {
13007 /* .i64 is a pseudo-op, so the immediate must be a repeating
13008 pattern. */
13009 if (immbits != (inst.operands[immoperand].regisimm ?
13010 inst.operands[immoperand].reg : 0))
13011 {
13012 /* Set immbits to an invalid constant. */
13013 immbits = 0xdeadbeef;
13014 }
13015 }
13016
13017 switch (opcode)
13018 {
13019 case N_MNEM_vbic:
13020 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13021 break;
13022
13023 case N_MNEM_vorr:
13024 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13025 break;
13026
13027 case N_MNEM_vand:
13028 /* Pseudo-instruction for VBIC. */
13029 neon_invert_size (&immbits, 0, et.size);
13030 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13031 break;
13032
13033 case N_MNEM_vorn:
13034 /* Pseudo-instruction for VORR. */
13035 neon_invert_size (&immbits, 0, et.size);
13036 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13037 break;
13038
13039 default:
13040 abort ();
13041 }
13042
13043 if (cmode == FAIL)
13044 return;
13045
13046 inst.instruction |= neon_quad (rs) << 6;
13047 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13048 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13049 inst.instruction |= cmode << 8;
13050 neon_write_immbits (immbits);
13051
13052 neon_dp_fixup (&inst);
13053 }
13054 }
13055
13056 static void
13057 do_neon_bitfield (void)
13058 {
13059 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13060 neon_check_type (3, rs, N_IGNORE_TYPE);
13061 neon_three_same (neon_quad (rs), 0, -1);
13062 }
13063
13064 static void
13065 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13066 unsigned destbits)
13067 {
13068 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13069 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13070 types | N_KEY);
13071 if (et.type == NT_float)
13072 {
13073 NEON_ENCODE (FLOAT, inst);
13074 neon_three_same (neon_quad (rs), 0, -1);
13075 }
13076 else
13077 {
13078 NEON_ENCODE (INTEGER, inst);
13079 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13080 }
13081 }
13082
13083 static void
13084 do_neon_dyadic_if_su (void)
13085 {
13086 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13087 }
13088
13089 static void
13090 do_neon_dyadic_if_su_d (void)
13091 {
13092 /* This version only allow D registers, but that constraint is enforced during
13093 operand parsing so we don't need to do anything extra here. */
13094 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13095 }
13096
13097 static void
13098 do_neon_dyadic_if_i_d (void)
13099 {
13100 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13101 affected if we specify unsigned args. */
13102 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13103 }
13104
13105 enum vfp_or_neon_is_neon_bits
13106 {
13107 NEON_CHECK_CC = 1,
13108 NEON_CHECK_ARCH = 2
13109 };
13110
13111 /* Call this function if an instruction which may have belonged to the VFP or
13112 Neon instruction sets, but turned out to be a Neon instruction (due to the
13113 operand types involved, etc.). We have to check and/or fix-up a couple of
13114 things:
13115
13116 - Make sure the user hasn't attempted to make a Neon instruction
13117 conditional.
13118 - Alter the value in the condition code field if necessary.
13119 - Make sure that the arch supports Neon instructions.
13120
13121 Which of these operations take place depends on bits from enum
13122 vfp_or_neon_is_neon_bits.
13123
13124 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13125 current instruction's condition is COND_ALWAYS, the condition field is
13126 changed to inst.uncond_value. This is necessary because instructions shared
13127 between VFP and Neon may be conditional for the VFP variants only, and the
13128 unconditional Neon version must have, e.g., 0xF in the condition field. */
13129
13130 static int
13131 vfp_or_neon_is_neon (unsigned check)
13132 {
13133 /* Conditions are always legal in Thumb mode (IT blocks). */
13134 if (!thumb_mode && (check & NEON_CHECK_CC))
13135 {
13136 if (inst.cond != COND_ALWAYS)
13137 {
13138 first_error (_(BAD_COND));
13139 return FAIL;
13140 }
13141 if (inst.uncond_value != -1)
13142 inst.instruction |= inst.uncond_value << 28;
13143 }
13144
13145 if ((check & NEON_CHECK_ARCH)
13146 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
13147 {
13148 first_error (_(BAD_FPU));
13149 return FAIL;
13150 }
13151
13152 return SUCCESS;
13153 }
13154
13155 static void
13156 do_neon_addsub_if_i (void)
13157 {
13158 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
13159 return;
13160
13161 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13162 return;
13163
13164 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13165 affected if we specify unsigned args. */
13166 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
13167 }
13168
13169 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13170 result to be:
13171 V<op> A,B (A is operand 0, B is operand 2)
13172 to mean:
13173 V<op> A,B,A
13174 not:
13175 V<op> A,B,B
13176 so handle that case specially. */
13177
13178 static void
13179 neon_exchange_operands (void)
13180 {
13181 void *scratch = alloca (sizeof (inst.operands[0]));
13182 if (inst.operands[1].present)
13183 {
13184 /* Swap operands[1] and operands[2]. */
13185 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
13186 inst.operands[1] = inst.operands[2];
13187 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
13188 }
13189 else
13190 {
13191 inst.operands[1] = inst.operands[2];
13192 inst.operands[2] = inst.operands[0];
13193 }
13194 }
13195
13196 static void
13197 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
13198 {
13199 if (inst.operands[2].isreg)
13200 {
13201 if (invert)
13202 neon_exchange_operands ();
13203 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
13204 }
13205 else
13206 {
13207 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13208 struct neon_type_el et = neon_check_type (2, rs,
13209 N_EQK | N_SIZ, immtypes | N_KEY);
13210
13211 NEON_ENCODE (IMMED, inst);
13212 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13213 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13214 inst.instruction |= LOW4 (inst.operands[1].reg);
13215 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13216 inst.instruction |= neon_quad (rs) << 6;
13217 inst.instruction |= (et.type == NT_float) << 10;
13218 inst.instruction |= neon_logbits (et.size) << 18;
13219
13220 neon_dp_fixup (&inst);
13221 }
13222 }
13223
13224 static void
13225 do_neon_cmp (void)
13226 {
13227 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
13228 }
13229
13230 static void
13231 do_neon_cmp_inv (void)
13232 {
13233 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
13234 }
13235
13236 static void
13237 do_neon_ceq (void)
13238 {
13239 neon_compare (N_IF_32, N_IF_32, FALSE);
13240 }
13241
13242 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13243 scalars, which are encoded in 5 bits, M : Rm.
13244 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13245 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13246 index in M. */
13247
13248 static unsigned
13249 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
13250 {
13251 unsigned regno = NEON_SCALAR_REG (scalar);
13252 unsigned elno = NEON_SCALAR_INDEX (scalar);
13253
13254 switch (elsize)
13255 {
13256 case 16:
13257 if (regno > 7 || elno > 3)
13258 goto bad_scalar;
13259 return regno | (elno << 3);
13260
13261 case 32:
13262 if (regno > 15 || elno > 1)
13263 goto bad_scalar;
13264 return regno | (elno << 4);
13265
13266 default:
13267 bad_scalar:
13268 first_error (_("scalar out of range for multiply instruction"));
13269 }
13270
13271 return 0;
13272 }
13273
13274 /* Encode multiply / multiply-accumulate scalar instructions. */
13275
13276 static void
13277 neon_mul_mac (struct neon_type_el et, int ubit)
13278 {
13279 unsigned scalar;
13280
13281 /* Give a more helpful error message if we have an invalid type. */
13282 if (et.type == NT_invtype)
13283 return;
13284
13285 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
13286 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13287 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13288 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13289 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13290 inst.instruction |= LOW4 (scalar);
13291 inst.instruction |= HI1 (scalar) << 5;
13292 inst.instruction |= (et.type == NT_float) << 8;
13293 inst.instruction |= neon_logbits (et.size) << 20;
13294 inst.instruction |= (ubit != 0) << 24;
13295
13296 neon_dp_fixup (&inst);
13297 }
13298
13299 static void
13300 do_neon_mac_maybe_scalar (void)
13301 {
13302 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
13303 return;
13304
13305 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13306 return;
13307
13308 if (inst.operands[2].isscalar)
13309 {
13310 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13311 struct neon_type_el et = neon_check_type (3, rs,
13312 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
13313 NEON_ENCODE (SCALAR, inst);
13314 neon_mul_mac (et, neon_quad (rs));
13315 }
13316 else
13317 {
13318 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13319 affected if we specify unsigned args. */
13320 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13321 }
13322 }
13323
13324 static void
13325 do_neon_fmac (void)
13326 {
13327 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
13328 return;
13329
13330 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13331 return;
13332
13333 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13334 }
13335
13336 static void
13337 do_neon_tst (void)
13338 {
13339 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13340 struct neon_type_el et = neon_check_type (3, rs,
13341 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
13342 neon_three_same (neon_quad (rs), 0, et.size);
13343 }
13344
13345 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13346 same types as the MAC equivalents. The polynomial type for this instruction
13347 is encoded the same as the integer type. */
13348
13349 static void
13350 do_neon_mul (void)
13351 {
13352 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
13353 return;
13354
13355 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13356 return;
13357
13358 if (inst.operands[2].isscalar)
13359 do_neon_mac_maybe_scalar ();
13360 else
13361 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
13362 }
13363
13364 static void
13365 do_neon_qdmulh (void)
13366 {
13367 if (inst.operands[2].isscalar)
13368 {
13369 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13370 struct neon_type_el et = neon_check_type (3, rs,
13371 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13372 NEON_ENCODE (SCALAR, inst);
13373 neon_mul_mac (et, neon_quad (rs));
13374 }
13375 else
13376 {
13377 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13378 struct neon_type_el et = neon_check_type (3, rs,
13379 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13380 NEON_ENCODE (INTEGER, inst);
13381 /* The U bit (rounding) comes from bit mask. */
13382 neon_three_same (neon_quad (rs), 0, et.size);
13383 }
13384 }
13385
13386 static void
13387 do_neon_fcmp_absolute (void)
13388 {
13389 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13390 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13391 /* Size field comes from bit mask. */
13392 neon_three_same (neon_quad (rs), 1, -1);
13393 }
13394
13395 static void
13396 do_neon_fcmp_absolute_inv (void)
13397 {
13398 neon_exchange_operands ();
13399 do_neon_fcmp_absolute ();
13400 }
13401
13402 static void
13403 do_neon_step (void)
13404 {
13405 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13406 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13407 neon_three_same (neon_quad (rs), 0, -1);
13408 }
13409
13410 static void
13411 do_neon_abs_neg (void)
13412 {
13413 enum neon_shape rs;
13414 struct neon_type_el et;
13415
13416 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
13417 return;
13418
13419 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13420 return;
13421
13422 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13423 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
13424
13425 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13426 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13427 inst.instruction |= LOW4 (inst.operands[1].reg);
13428 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13429 inst.instruction |= neon_quad (rs) << 6;
13430 inst.instruction |= (et.type == NT_float) << 10;
13431 inst.instruction |= neon_logbits (et.size) << 18;
13432
13433 neon_dp_fixup (&inst);
13434 }
13435
13436 static void
13437 do_neon_sli (void)
13438 {
13439 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13440 struct neon_type_el et = neon_check_type (2, rs,
13441 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13442 int imm = inst.operands[2].imm;
13443 constraint (imm < 0 || (unsigned)imm >= et.size,
13444 _("immediate out of range for insert"));
13445 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13446 }
13447
13448 static void
13449 do_neon_sri (void)
13450 {
13451 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13452 struct neon_type_el et = neon_check_type (2, rs,
13453 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13454 int imm = inst.operands[2].imm;
13455 constraint (imm < 1 || (unsigned)imm > et.size,
13456 _("immediate out of range for insert"));
13457 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
13458 }
13459
13460 static void
13461 do_neon_qshlu_imm (void)
13462 {
13463 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13464 struct neon_type_el et = neon_check_type (2, rs,
13465 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
13466 int imm = inst.operands[2].imm;
13467 constraint (imm < 0 || (unsigned)imm >= et.size,
13468 _("immediate out of range for shift"));
13469 /* Only encodes the 'U present' variant of the instruction.
13470 In this case, signed types have OP (bit 8) set to 0.
13471 Unsigned types have OP set to 1. */
13472 inst.instruction |= (et.type == NT_unsigned) << 8;
13473 /* The rest of the bits are the same as other immediate shifts. */
13474 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13475 }
13476
13477 static void
13478 do_neon_qmovn (void)
13479 {
13480 struct neon_type_el et = neon_check_type (2, NS_DQ,
13481 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13482 /* Saturating move where operands can be signed or unsigned, and the
13483 destination has the same signedness. */
13484 NEON_ENCODE (INTEGER, inst);
13485 if (et.type == NT_unsigned)
13486 inst.instruction |= 0xc0;
13487 else
13488 inst.instruction |= 0x80;
13489 neon_two_same (0, 1, et.size / 2);
13490 }
13491
13492 static void
13493 do_neon_qmovun (void)
13494 {
13495 struct neon_type_el et = neon_check_type (2, NS_DQ,
13496 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13497 /* Saturating move with unsigned results. Operands must be signed. */
13498 NEON_ENCODE (INTEGER, inst);
13499 neon_two_same (0, 1, et.size / 2);
13500 }
13501
13502 static void
13503 do_neon_rshift_sat_narrow (void)
13504 {
13505 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13506 or unsigned. If operands are unsigned, results must also be unsigned. */
13507 struct neon_type_el et = neon_check_type (2, NS_DQI,
13508 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13509 int imm = inst.operands[2].imm;
13510 /* This gets the bounds check, size encoding and immediate bits calculation
13511 right. */
13512 et.size /= 2;
13513
13514 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13515 VQMOVN.I<size> <Dd>, <Qm>. */
13516 if (imm == 0)
13517 {
13518 inst.operands[2].present = 0;
13519 inst.instruction = N_MNEM_vqmovn;
13520 do_neon_qmovn ();
13521 return;
13522 }
13523
13524 constraint (imm < 1 || (unsigned)imm > et.size,
13525 _("immediate out of range"));
13526 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
13527 }
13528
13529 static void
13530 do_neon_rshift_sat_narrow_u (void)
13531 {
13532 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13533 or unsigned. If operands are unsigned, results must also be unsigned. */
13534 struct neon_type_el et = neon_check_type (2, NS_DQI,
13535 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13536 int imm = inst.operands[2].imm;
13537 /* This gets the bounds check, size encoding and immediate bits calculation
13538 right. */
13539 et.size /= 2;
13540
13541 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13542 VQMOVUN.I<size> <Dd>, <Qm>. */
13543 if (imm == 0)
13544 {
13545 inst.operands[2].present = 0;
13546 inst.instruction = N_MNEM_vqmovun;
13547 do_neon_qmovun ();
13548 return;
13549 }
13550
13551 constraint (imm < 1 || (unsigned)imm > et.size,
13552 _("immediate out of range"));
13553 /* FIXME: The manual is kind of unclear about what value U should have in
13554 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13555 must be 1. */
13556 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
13557 }
13558
13559 static void
13560 do_neon_movn (void)
13561 {
13562 struct neon_type_el et = neon_check_type (2, NS_DQ,
13563 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13564 NEON_ENCODE (INTEGER, inst);
13565 neon_two_same (0, 1, et.size / 2);
13566 }
13567
13568 static void
13569 do_neon_rshift_narrow (void)
13570 {
13571 struct neon_type_el et = neon_check_type (2, NS_DQI,
13572 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13573 int imm = inst.operands[2].imm;
13574 /* This gets the bounds check, size encoding and immediate bits calculation
13575 right. */
13576 et.size /= 2;
13577
13578 /* If immediate is zero then we are a pseudo-instruction for
13579 VMOVN.I<size> <Dd>, <Qm> */
13580 if (imm == 0)
13581 {
13582 inst.operands[2].present = 0;
13583 inst.instruction = N_MNEM_vmovn;
13584 do_neon_movn ();
13585 return;
13586 }
13587
13588 constraint (imm < 1 || (unsigned)imm > et.size,
13589 _("immediate out of range for narrowing operation"));
13590 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
13591 }
13592
13593 static void
13594 do_neon_shll (void)
13595 {
13596 /* FIXME: Type checking when lengthening. */
13597 struct neon_type_el et = neon_check_type (2, NS_QDI,
13598 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
13599 unsigned imm = inst.operands[2].imm;
13600
13601 if (imm == et.size)
13602 {
13603 /* Maximum shift variant. */
13604 NEON_ENCODE (INTEGER, inst);
13605 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13606 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13607 inst.instruction |= LOW4 (inst.operands[1].reg);
13608 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13609 inst.instruction |= neon_logbits (et.size) << 18;
13610
13611 neon_dp_fixup (&inst);
13612 }
13613 else
13614 {
13615 /* A more-specific type check for non-max versions. */
13616 et = neon_check_type (2, NS_QDI,
13617 N_EQK | N_DBL, N_SU_32 | N_KEY);
13618 NEON_ENCODE (IMMED, inst);
13619 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
13620 }
13621 }
13622
13623 /* Check the various types for the VCVT instruction, and return which version
13624 the current instruction is. */
13625
13626 static int
13627 neon_cvt_flavour (enum neon_shape rs)
13628 {
13629 #define CVT_VAR(C,X,Y) \
13630 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13631 if (et.type != NT_invtype) \
13632 { \
13633 inst.error = NULL; \
13634 return (C); \
13635 }
13636 struct neon_type_el et;
13637 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
13638 || rs == NS_FF) ? N_VFP : 0;
13639 /* The instruction versions which take an immediate take one register
13640 argument, which is extended to the width of the full register. Thus the
13641 "source" and "destination" registers must have the same width. Hack that
13642 here by making the size equal to the key (wider, in this case) operand. */
13643 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
13644
13645 CVT_VAR (0, N_S32, N_F32);
13646 CVT_VAR (1, N_U32, N_F32);
13647 CVT_VAR (2, N_F32, N_S32);
13648 CVT_VAR (3, N_F32, N_U32);
13649 /* Half-precision conversions. */
13650 CVT_VAR (4, N_F32, N_F16);
13651 CVT_VAR (5, N_F16, N_F32);
13652
13653 whole_reg = N_VFP;
13654
13655 /* VFP instructions. */
13656 CVT_VAR (6, N_F32, N_F64);
13657 CVT_VAR (7, N_F64, N_F32);
13658 CVT_VAR (8, N_S32, N_F64 | key);
13659 CVT_VAR (9, N_U32, N_F64 | key);
13660 CVT_VAR (10, N_F64 | key, N_S32);
13661 CVT_VAR (11, N_F64 | key, N_U32);
13662 /* VFP instructions with bitshift. */
13663 CVT_VAR (12, N_F32 | key, N_S16);
13664 CVT_VAR (13, N_F32 | key, N_U16);
13665 CVT_VAR (14, N_F64 | key, N_S16);
13666 CVT_VAR (15, N_F64 | key, N_U16);
13667 CVT_VAR (16, N_S16, N_F32 | key);
13668 CVT_VAR (17, N_U16, N_F32 | key);
13669 CVT_VAR (18, N_S16, N_F64 | key);
13670 CVT_VAR (19, N_U16, N_F64 | key);
13671
13672 return -1;
13673 #undef CVT_VAR
13674 }
13675
13676 /* Neon-syntax VFP conversions. */
13677
13678 static void
13679 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
13680 {
13681 const char *opname = 0;
13682
13683 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
13684 {
13685 /* Conversions with immediate bitshift. */
13686 const char *enc[] =
13687 {
13688 "ftosls",
13689 "ftouls",
13690 "fsltos",
13691 "fultos",
13692 NULL,
13693 NULL,
13694 NULL,
13695 NULL,
13696 "ftosld",
13697 "ftould",
13698 "fsltod",
13699 "fultod",
13700 "fshtos",
13701 "fuhtos",
13702 "fshtod",
13703 "fuhtod",
13704 "ftoshs",
13705 "ftouhs",
13706 "ftoshd",
13707 "ftouhd"
13708 };
13709
13710 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13711 {
13712 opname = enc[flavour];
13713 constraint (inst.operands[0].reg != inst.operands[1].reg,
13714 _("operands 0 and 1 must be the same register"));
13715 inst.operands[1] = inst.operands[2];
13716 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
13717 }
13718 }
13719 else
13720 {
13721 /* Conversions without bitshift. */
13722 const char *enc[] =
13723 {
13724 "ftosis",
13725 "ftouis",
13726 "fsitos",
13727 "fuitos",
13728 "NULL",
13729 "NULL",
13730 "fcvtsd",
13731 "fcvtds",
13732 "ftosid",
13733 "ftouid",
13734 "fsitod",
13735 "fuitod"
13736 };
13737
13738 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13739 opname = enc[flavour];
13740 }
13741
13742 if (opname)
13743 do_vfp_nsyn_opcode (opname);
13744 }
13745
13746 static void
13747 do_vfp_nsyn_cvtz (void)
13748 {
13749 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
13750 int flavour = neon_cvt_flavour (rs);
13751 const char *enc[] =
13752 {
13753 "ftosizs",
13754 "ftouizs",
13755 NULL,
13756 NULL,
13757 NULL,
13758 NULL,
13759 NULL,
13760 NULL,
13761 "ftosizd",
13762 "ftouizd"
13763 };
13764
13765 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
13766 do_vfp_nsyn_opcode (enc[flavour]);
13767 }
13768
13769 static void
13770 do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED)
13771 {
13772 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
13773 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
13774 int flavour = neon_cvt_flavour (rs);
13775
13776 /* PR11109: Handle round-to-zero for VCVT conversions. */
13777 if (round_to_zero
13778 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
13779 && (flavour == 0 || flavour == 1 || flavour == 8 || flavour == 9)
13780 && (rs == NS_FD || rs == NS_FF))
13781 {
13782 do_vfp_nsyn_cvtz ();
13783 return;
13784 }
13785
13786 /* VFP rather than Neon conversions. */
13787 if (flavour >= 6)
13788 {
13789 do_vfp_nsyn_cvt (rs, flavour);
13790 return;
13791 }
13792
13793 switch (rs)
13794 {
13795 case NS_DDI:
13796 case NS_QQI:
13797 {
13798 unsigned immbits;
13799 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13800
13801 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13802 return;
13803
13804 /* Fixed-point conversion with #0 immediate is encoded as an
13805 integer conversion. */
13806 if (inst.operands[2].present && inst.operands[2].imm == 0)
13807 goto int_encode;
13808 immbits = 32 - inst.operands[2].imm;
13809 NEON_ENCODE (IMMED, inst);
13810 if (flavour != -1)
13811 inst.instruction |= enctab[flavour];
13812 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13813 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13814 inst.instruction |= LOW4 (inst.operands[1].reg);
13815 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13816 inst.instruction |= neon_quad (rs) << 6;
13817 inst.instruction |= 1 << 21;
13818 inst.instruction |= immbits << 16;
13819
13820 neon_dp_fixup (&inst);
13821 }
13822 break;
13823
13824 case NS_DD:
13825 case NS_QQ:
13826 int_encode:
13827 {
13828 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
13829
13830 NEON_ENCODE (INTEGER, inst);
13831
13832 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13833 return;
13834
13835 if (flavour != -1)
13836 inst.instruction |= enctab[flavour];
13837
13838 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13839 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13840 inst.instruction |= LOW4 (inst.operands[1].reg);
13841 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13842 inst.instruction |= neon_quad (rs) << 6;
13843 inst.instruction |= 2 << 18;
13844
13845 neon_dp_fixup (&inst);
13846 }
13847 break;
13848
13849 /* Half-precision conversions for Advanced SIMD -- neon. */
13850 case NS_QD:
13851 case NS_DQ:
13852
13853 if ((rs == NS_DQ)
13854 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
13855 {
13856 as_bad (_("operand size must match register width"));
13857 break;
13858 }
13859
13860 if ((rs == NS_QD)
13861 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
13862 {
13863 as_bad (_("operand size must match register width"));
13864 break;
13865 }
13866
13867 if (rs == NS_DQ)
13868 inst.instruction = 0x3b60600;
13869 else
13870 inst.instruction = 0x3b60700;
13871
13872 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13873 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13874 inst.instruction |= LOW4 (inst.operands[1].reg);
13875 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13876 neon_dp_fixup (&inst);
13877 break;
13878
13879 default:
13880 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13881 do_vfp_nsyn_cvt (rs, flavour);
13882 }
13883 }
13884
13885 static void
13886 do_neon_cvtr (void)
13887 {
13888 do_neon_cvt_1 (FALSE);
13889 }
13890
13891 static void
13892 do_neon_cvt (void)
13893 {
13894 do_neon_cvt_1 (TRUE);
13895 }
13896
13897 static void
13898 do_neon_cvtb (void)
13899 {
13900 inst.instruction = 0xeb20a40;
13901
13902 /* The sizes are attached to the mnemonic. */
13903 if (inst.vectype.el[0].type != NT_invtype
13904 && inst.vectype.el[0].size == 16)
13905 inst.instruction |= 0x00010000;
13906
13907 /* Programmer's syntax: the sizes are attached to the operands. */
13908 else if (inst.operands[0].vectype.type != NT_invtype
13909 && inst.operands[0].vectype.size == 16)
13910 inst.instruction |= 0x00010000;
13911
13912 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
13913 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
13914 do_vfp_cond_or_thumb ();
13915 }
13916
13917
13918 static void
13919 do_neon_cvtt (void)
13920 {
13921 do_neon_cvtb ();
13922 inst.instruction |= 0x80;
13923 }
13924
13925 static void
13926 neon_move_immediate (void)
13927 {
13928 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
13929 struct neon_type_el et = neon_check_type (2, rs,
13930 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13931 unsigned immlo, immhi = 0, immbits;
13932 int op, cmode, float_p;
13933
13934 constraint (et.type == NT_invtype,
13935 _("operand size must be specified for immediate VMOV"));
13936
13937 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13938 op = (inst.instruction & (1 << 5)) != 0;
13939
13940 immlo = inst.operands[1].imm;
13941 if (inst.operands[1].regisimm)
13942 immhi = inst.operands[1].reg;
13943
13944 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
13945 _("immediate has bits set outside the operand size"));
13946
13947 float_p = inst.operands[1].immisfloat;
13948
13949 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
13950 et.size, et.type)) == FAIL)
13951 {
13952 /* Invert relevant bits only. */
13953 neon_invert_size (&immlo, &immhi, et.size);
13954 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13955 with one or the other; those cases are caught by
13956 neon_cmode_for_move_imm. */
13957 op = !op;
13958 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
13959 &op, et.size, et.type)) == FAIL)
13960 {
13961 first_error (_("immediate out of range"));
13962 return;
13963 }
13964 }
13965
13966 inst.instruction &= ~(1 << 5);
13967 inst.instruction |= op << 5;
13968
13969 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13970 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13971 inst.instruction |= neon_quad (rs) << 6;
13972 inst.instruction |= cmode << 8;
13973
13974 neon_write_immbits (immbits);
13975 }
13976
13977 static void
13978 do_neon_mvn (void)
13979 {
13980 if (inst.operands[1].isreg)
13981 {
13982 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13983
13984 NEON_ENCODE (INTEGER, inst);
13985 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13986 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13987 inst.instruction |= LOW4 (inst.operands[1].reg);
13988 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13989 inst.instruction |= neon_quad (rs) << 6;
13990 }
13991 else
13992 {
13993 NEON_ENCODE (IMMED, inst);
13994 neon_move_immediate ();
13995 }
13996
13997 neon_dp_fixup (&inst);
13998 }
13999
14000 /* Encode instructions of form:
14001
14002 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14003 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
14004
14005 static void
14006 neon_mixed_length (struct neon_type_el et, unsigned size)
14007 {
14008 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14009 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14010 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14011 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14012 inst.instruction |= LOW4 (inst.operands[2].reg);
14013 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14014 inst.instruction |= (et.type == NT_unsigned) << 24;
14015 inst.instruction |= neon_logbits (size) << 20;
14016
14017 neon_dp_fixup (&inst);
14018 }
14019
14020 static void
14021 do_neon_dyadic_long (void)
14022 {
14023 /* FIXME: Type checking for lengthening op. */
14024 struct neon_type_el et = neon_check_type (3, NS_QDD,
14025 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
14026 neon_mixed_length (et, et.size);
14027 }
14028
14029 static void
14030 do_neon_abal (void)
14031 {
14032 struct neon_type_el et = neon_check_type (3, NS_QDD,
14033 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
14034 neon_mixed_length (et, et.size);
14035 }
14036
14037 static void
14038 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
14039 {
14040 if (inst.operands[2].isscalar)
14041 {
14042 struct neon_type_el et = neon_check_type (3, NS_QDS,
14043 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
14044 NEON_ENCODE (SCALAR, inst);
14045 neon_mul_mac (et, et.type == NT_unsigned);
14046 }
14047 else
14048 {
14049 struct neon_type_el et = neon_check_type (3, NS_QDD,
14050 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
14051 NEON_ENCODE (INTEGER, inst);
14052 neon_mixed_length (et, et.size);
14053 }
14054 }
14055
14056 static void
14057 do_neon_mac_maybe_scalar_long (void)
14058 {
14059 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
14060 }
14061
14062 static void
14063 do_neon_dyadic_wide (void)
14064 {
14065 struct neon_type_el et = neon_check_type (3, NS_QQD,
14066 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
14067 neon_mixed_length (et, et.size);
14068 }
14069
14070 static void
14071 do_neon_dyadic_narrow (void)
14072 {
14073 struct neon_type_el et = neon_check_type (3, NS_QDD,
14074 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
14075 /* Operand sign is unimportant, and the U bit is part of the opcode,
14076 so force the operand type to integer. */
14077 et.type = NT_integer;
14078 neon_mixed_length (et, et.size / 2);
14079 }
14080
14081 static void
14082 do_neon_mul_sat_scalar_long (void)
14083 {
14084 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
14085 }
14086
14087 static void
14088 do_neon_vmull (void)
14089 {
14090 if (inst.operands[2].isscalar)
14091 do_neon_mac_maybe_scalar_long ();
14092 else
14093 {
14094 struct neon_type_el et = neon_check_type (3, NS_QDD,
14095 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
14096 if (et.type == NT_poly)
14097 NEON_ENCODE (POLY, inst);
14098 else
14099 NEON_ENCODE (INTEGER, inst);
14100 /* For polynomial encoding, size field must be 0b00 and the U bit must be
14101 zero. Should be OK as-is. */
14102 neon_mixed_length (et, et.size);
14103 }
14104 }
14105
14106 static void
14107 do_neon_ext (void)
14108 {
14109 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
14110 struct neon_type_el et = neon_check_type (3, rs,
14111 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14112 unsigned imm = (inst.operands[3].imm * et.size) / 8;
14113
14114 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
14115 _("shift out of range"));
14116 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14117 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14118 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14119 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14120 inst.instruction |= LOW4 (inst.operands[2].reg);
14121 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14122 inst.instruction |= neon_quad (rs) << 6;
14123 inst.instruction |= imm << 8;
14124
14125 neon_dp_fixup (&inst);
14126 }
14127
14128 static void
14129 do_neon_rev (void)
14130 {
14131 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14132 struct neon_type_el et = neon_check_type (2, rs,
14133 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14134 unsigned op = (inst.instruction >> 7) & 3;
14135 /* N (width of reversed regions) is encoded as part of the bitmask. We
14136 extract it here to check the elements to be reversed are smaller.
14137 Otherwise we'd get a reserved instruction. */
14138 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
14139 gas_assert (elsize != 0);
14140 constraint (et.size >= elsize,
14141 _("elements must be smaller than reversal region"));
14142 neon_two_same (neon_quad (rs), 1, et.size);
14143 }
14144
14145 static void
14146 do_neon_dup (void)
14147 {
14148 if (inst.operands[1].isscalar)
14149 {
14150 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
14151 struct neon_type_el et = neon_check_type (2, rs,
14152 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14153 unsigned sizebits = et.size >> 3;
14154 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
14155 int logsize = neon_logbits (et.size);
14156 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
14157
14158 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
14159 return;
14160
14161 NEON_ENCODE (SCALAR, inst);
14162 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14163 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14164 inst.instruction |= LOW4 (dm);
14165 inst.instruction |= HI1 (dm) << 5;
14166 inst.instruction |= neon_quad (rs) << 6;
14167 inst.instruction |= x << 17;
14168 inst.instruction |= sizebits << 16;
14169
14170 neon_dp_fixup (&inst);
14171 }
14172 else
14173 {
14174 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
14175 struct neon_type_el et = neon_check_type (2, rs,
14176 N_8 | N_16 | N_32 | N_KEY, N_EQK);
14177 /* Duplicate ARM register to lanes of vector. */
14178 NEON_ENCODE (ARMREG, inst);
14179 switch (et.size)
14180 {
14181 case 8: inst.instruction |= 0x400000; break;
14182 case 16: inst.instruction |= 0x000020; break;
14183 case 32: inst.instruction |= 0x000000; break;
14184 default: break;
14185 }
14186 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14187 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
14188 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
14189 inst.instruction |= neon_quad (rs) << 21;
14190 /* The encoding for this instruction is identical for the ARM and Thumb
14191 variants, except for the condition field. */
14192 do_vfp_cond_or_thumb ();
14193 }
14194 }
14195
14196 /* VMOV has particularly many variations. It can be one of:
14197 0. VMOV<c><q> <Qd>, <Qm>
14198 1. VMOV<c><q> <Dd>, <Dm>
14199 (Register operations, which are VORR with Rm = Rn.)
14200 2. VMOV<c><q>.<dt> <Qd>, #<imm>
14201 3. VMOV<c><q>.<dt> <Dd>, #<imm>
14202 (Immediate loads.)
14203 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14204 (ARM register to scalar.)
14205 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14206 (Two ARM registers to vector.)
14207 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14208 (Scalar to ARM register.)
14209 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14210 (Vector to two ARM registers.)
14211 8. VMOV.F32 <Sd>, <Sm>
14212 9. VMOV.F64 <Dd>, <Dm>
14213 (VFP register moves.)
14214 10. VMOV.F32 <Sd>, #imm
14215 11. VMOV.F64 <Dd>, #imm
14216 (VFP float immediate load.)
14217 12. VMOV <Rd>, <Sm>
14218 (VFP single to ARM reg.)
14219 13. VMOV <Sd>, <Rm>
14220 (ARM reg to VFP single.)
14221 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14222 (Two ARM regs to two VFP singles.)
14223 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14224 (Two VFP singles to two ARM regs.)
14225
14226 These cases can be disambiguated using neon_select_shape, except cases 1/9
14227 and 3/11 which depend on the operand type too.
14228
14229 All the encoded bits are hardcoded by this function.
14230
14231 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14232 Cases 5, 7 may be used with VFPv2 and above.
14233
14234 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14235 can specify a type where it doesn't make sense to, and is ignored). */
14236
14237 static void
14238 do_neon_mov (void)
14239 {
14240 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
14241 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
14242 NS_NULL);
14243 struct neon_type_el et;
14244 const char *ldconst = 0;
14245
14246 switch (rs)
14247 {
14248 case NS_DD: /* case 1/9. */
14249 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14250 /* It is not an error here if no type is given. */
14251 inst.error = NULL;
14252 if (et.type == NT_float && et.size == 64)
14253 {
14254 do_vfp_nsyn_opcode ("fcpyd");
14255 break;
14256 }
14257 /* fall through. */
14258
14259 case NS_QQ: /* case 0/1. */
14260 {
14261 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14262 return;
14263 /* The architecture manual I have doesn't explicitly state which
14264 value the U bit should have for register->register moves, but
14265 the equivalent VORR instruction has U = 0, so do that. */
14266 inst.instruction = 0x0200110;
14267 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14268 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14269 inst.instruction |= LOW4 (inst.operands[1].reg);
14270 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14271 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14272 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14273 inst.instruction |= neon_quad (rs) << 6;
14274
14275 neon_dp_fixup (&inst);
14276 }
14277 break;
14278
14279 case NS_DI: /* case 3/11. */
14280 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14281 inst.error = NULL;
14282 if (et.type == NT_float && et.size == 64)
14283 {
14284 /* case 11 (fconstd). */
14285 ldconst = "fconstd";
14286 goto encode_fconstd;
14287 }
14288 /* fall through. */
14289
14290 case NS_QI: /* case 2/3. */
14291 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14292 return;
14293 inst.instruction = 0x0800010;
14294 neon_move_immediate ();
14295 neon_dp_fixup (&inst);
14296 break;
14297
14298 case NS_SR: /* case 4. */
14299 {
14300 unsigned bcdebits = 0;
14301 int logsize;
14302 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
14303 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
14304
14305 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
14306 logsize = neon_logbits (et.size);
14307
14308 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14309 _(BAD_FPU));
14310 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14311 && et.size != 32, _(BAD_FPU));
14312 constraint (et.type == NT_invtype, _("bad type for scalar"));
14313 constraint (x >= 64 / et.size, _("scalar index out of range"));
14314
14315 switch (et.size)
14316 {
14317 case 8: bcdebits = 0x8; break;
14318 case 16: bcdebits = 0x1; break;
14319 case 32: bcdebits = 0x0; break;
14320 default: ;
14321 }
14322
14323 bcdebits |= x << logsize;
14324
14325 inst.instruction = 0xe000b10;
14326 do_vfp_cond_or_thumb ();
14327 inst.instruction |= LOW4 (dn) << 16;
14328 inst.instruction |= HI1 (dn) << 7;
14329 inst.instruction |= inst.operands[1].reg << 12;
14330 inst.instruction |= (bcdebits & 3) << 5;
14331 inst.instruction |= (bcdebits >> 2) << 21;
14332 }
14333 break;
14334
14335 case NS_DRR: /* case 5 (fmdrr). */
14336 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14337 _(BAD_FPU));
14338
14339 inst.instruction = 0xc400b10;
14340 do_vfp_cond_or_thumb ();
14341 inst.instruction |= LOW4 (inst.operands[0].reg);
14342 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
14343 inst.instruction |= inst.operands[1].reg << 12;
14344 inst.instruction |= inst.operands[2].reg << 16;
14345 break;
14346
14347 case NS_RS: /* case 6. */
14348 {
14349 unsigned logsize;
14350 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
14351 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
14352 unsigned abcdebits = 0;
14353
14354 et = neon_check_type (2, NS_NULL,
14355 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
14356 logsize = neon_logbits (et.size);
14357
14358 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14359 _(BAD_FPU));
14360 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14361 && et.size != 32, _(BAD_FPU));
14362 constraint (et.type == NT_invtype, _("bad type for scalar"));
14363 constraint (x >= 64 / et.size, _("scalar index out of range"));
14364
14365 switch (et.size)
14366 {
14367 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
14368 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
14369 case 32: abcdebits = 0x00; break;
14370 default: ;
14371 }
14372
14373 abcdebits |= x << logsize;
14374 inst.instruction = 0xe100b10;
14375 do_vfp_cond_or_thumb ();
14376 inst.instruction |= LOW4 (dn) << 16;
14377 inst.instruction |= HI1 (dn) << 7;
14378 inst.instruction |= inst.operands[0].reg << 12;
14379 inst.instruction |= (abcdebits & 3) << 5;
14380 inst.instruction |= (abcdebits >> 2) << 21;
14381 }
14382 break;
14383
14384 case NS_RRD: /* case 7 (fmrrd). */
14385 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14386 _(BAD_FPU));
14387
14388 inst.instruction = 0xc500b10;
14389 do_vfp_cond_or_thumb ();
14390 inst.instruction |= inst.operands[0].reg << 12;
14391 inst.instruction |= inst.operands[1].reg << 16;
14392 inst.instruction |= LOW4 (inst.operands[2].reg);
14393 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14394 break;
14395
14396 case NS_FF: /* case 8 (fcpys). */
14397 do_vfp_nsyn_opcode ("fcpys");
14398 break;
14399
14400 case NS_FI: /* case 10 (fconsts). */
14401 ldconst = "fconsts";
14402 encode_fconstd:
14403 if (is_quarter_float (inst.operands[1].imm))
14404 {
14405 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
14406 do_vfp_nsyn_opcode (ldconst);
14407 }
14408 else
14409 first_error (_("immediate out of range"));
14410 break;
14411
14412 case NS_RF: /* case 12 (fmrs). */
14413 do_vfp_nsyn_opcode ("fmrs");
14414 break;
14415
14416 case NS_FR: /* case 13 (fmsr). */
14417 do_vfp_nsyn_opcode ("fmsr");
14418 break;
14419
14420 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14421 (one of which is a list), but we have parsed four. Do some fiddling to
14422 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14423 expect. */
14424 case NS_RRFF: /* case 14 (fmrrs). */
14425 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
14426 _("VFP registers must be adjacent"));
14427 inst.operands[2].imm = 2;
14428 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14429 do_vfp_nsyn_opcode ("fmrrs");
14430 break;
14431
14432 case NS_FFRR: /* case 15 (fmsrr). */
14433 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
14434 _("VFP registers must be adjacent"));
14435 inst.operands[1] = inst.operands[2];
14436 inst.operands[2] = inst.operands[3];
14437 inst.operands[0].imm = 2;
14438 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14439 do_vfp_nsyn_opcode ("fmsrr");
14440 break;
14441
14442 default:
14443 abort ();
14444 }
14445 }
14446
14447 static void
14448 do_neon_rshift_round_imm (void)
14449 {
14450 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14451 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14452 int imm = inst.operands[2].imm;
14453
14454 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14455 if (imm == 0)
14456 {
14457 inst.operands[2].present = 0;
14458 do_neon_mov ();
14459 return;
14460 }
14461
14462 constraint (imm < 1 || (unsigned)imm > et.size,
14463 _("immediate out of range for shift"));
14464 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
14465 et.size - imm);
14466 }
14467
14468 static void
14469 do_neon_movl (void)
14470 {
14471 struct neon_type_el et = neon_check_type (2, NS_QD,
14472 N_EQK | N_DBL, N_SU_32 | N_KEY);
14473 unsigned sizebits = et.size >> 3;
14474 inst.instruction |= sizebits << 19;
14475 neon_two_same (0, et.type == NT_unsigned, -1);
14476 }
14477
14478 static void
14479 do_neon_trn (void)
14480 {
14481 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14482 struct neon_type_el et = neon_check_type (2, rs,
14483 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14484 NEON_ENCODE (INTEGER, inst);
14485 neon_two_same (neon_quad (rs), 1, et.size);
14486 }
14487
14488 static void
14489 do_neon_zip_uzp (void)
14490 {
14491 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14492 struct neon_type_el et = neon_check_type (2, rs,
14493 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14494 if (rs == NS_DD && et.size == 32)
14495 {
14496 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14497 inst.instruction = N_MNEM_vtrn;
14498 do_neon_trn ();
14499 return;
14500 }
14501 neon_two_same (neon_quad (rs), 1, et.size);
14502 }
14503
14504 static void
14505 do_neon_sat_abs_neg (void)
14506 {
14507 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14508 struct neon_type_el et = neon_check_type (2, rs,
14509 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14510 neon_two_same (neon_quad (rs), 1, et.size);
14511 }
14512
14513 static void
14514 do_neon_pair_long (void)
14515 {
14516 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14517 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
14518 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14519 inst.instruction |= (et.type == NT_unsigned) << 7;
14520 neon_two_same (neon_quad (rs), 1, et.size);
14521 }
14522
14523 static void
14524 do_neon_recip_est (void)
14525 {
14526 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14527 struct neon_type_el et = neon_check_type (2, rs,
14528 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
14529 inst.instruction |= (et.type == NT_float) << 8;
14530 neon_two_same (neon_quad (rs), 1, et.size);
14531 }
14532
14533 static void
14534 do_neon_cls (void)
14535 {
14536 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14537 struct neon_type_el et = neon_check_type (2, rs,
14538 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14539 neon_two_same (neon_quad (rs), 1, et.size);
14540 }
14541
14542 static void
14543 do_neon_clz (void)
14544 {
14545 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14546 struct neon_type_el et = neon_check_type (2, rs,
14547 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
14548 neon_two_same (neon_quad (rs), 1, et.size);
14549 }
14550
14551 static void
14552 do_neon_cnt (void)
14553 {
14554 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14555 struct neon_type_el et = neon_check_type (2, rs,
14556 N_EQK | N_INT, N_8 | N_KEY);
14557 neon_two_same (neon_quad (rs), 1, et.size);
14558 }
14559
14560 static void
14561 do_neon_swp (void)
14562 {
14563 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14564 neon_two_same (neon_quad (rs), 1, -1);
14565 }
14566
14567 static void
14568 do_neon_tbl_tbx (void)
14569 {
14570 unsigned listlenbits;
14571 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
14572
14573 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
14574 {
14575 first_error (_("bad list length for table lookup"));
14576 return;
14577 }
14578
14579 listlenbits = inst.operands[1].imm - 1;
14580 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14581 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14582 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14583 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14584 inst.instruction |= LOW4 (inst.operands[2].reg);
14585 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14586 inst.instruction |= listlenbits << 8;
14587
14588 neon_dp_fixup (&inst);
14589 }
14590
14591 static void
14592 do_neon_ldm_stm (void)
14593 {
14594 /* P, U and L bits are part of bitmask. */
14595 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
14596 unsigned offsetbits = inst.operands[1].imm * 2;
14597
14598 if (inst.operands[1].issingle)
14599 {
14600 do_vfp_nsyn_ldm_stm (is_dbmode);
14601 return;
14602 }
14603
14604 constraint (is_dbmode && !inst.operands[0].writeback,
14605 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14606
14607 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14608 _("register list must contain at least 1 and at most 16 "
14609 "registers"));
14610
14611 inst.instruction |= inst.operands[0].reg << 16;
14612 inst.instruction |= inst.operands[0].writeback << 21;
14613 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14614 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
14615
14616 inst.instruction |= offsetbits;
14617
14618 do_vfp_cond_or_thumb ();
14619 }
14620
14621 static void
14622 do_neon_ldr_str (void)
14623 {
14624 int is_ldr = (inst.instruction & (1 << 20)) != 0;
14625
14626 if (inst.operands[0].issingle)
14627 {
14628 if (is_ldr)
14629 do_vfp_nsyn_opcode ("flds");
14630 else
14631 do_vfp_nsyn_opcode ("fsts");
14632 }
14633 else
14634 {
14635 if (is_ldr)
14636 do_vfp_nsyn_opcode ("fldd");
14637 else
14638 do_vfp_nsyn_opcode ("fstd");
14639 }
14640 }
14641
14642 /* "interleave" version also handles non-interleaving register VLD1/VST1
14643 instructions. */
14644
14645 static void
14646 do_neon_ld_st_interleave (void)
14647 {
14648 struct neon_type_el et = neon_check_type (1, NS_NULL,
14649 N_8 | N_16 | N_32 | N_64);
14650 unsigned alignbits = 0;
14651 unsigned idx;
14652 /* The bits in this table go:
14653 0: register stride of one (0) or two (1)
14654 1,2: register list length, minus one (1, 2, 3, 4).
14655 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14656 We use -1 for invalid entries. */
14657 const int typetable[] =
14658 {
14659 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14660 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14661 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14662 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14663 };
14664 int typebits;
14665
14666 if (et.type == NT_invtype)
14667 return;
14668
14669 if (inst.operands[1].immisalign)
14670 switch (inst.operands[1].imm >> 8)
14671 {
14672 case 64: alignbits = 1; break;
14673 case 128:
14674 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
14675 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
14676 goto bad_alignment;
14677 alignbits = 2;
14678 break;
14679 case 256:
14680 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
14681 goto bad_alignment;
14682 alignbits = 3;
14683 break;
14684 default:
14685 bad_alignment:
14686 first_error (_("bad alignment"));
14687 return;
14688 }
14689
14690 inst.instruction |= alignbits << 4;
14691 inst.instruction |= neon_logbits (et.size) << 6;
14692
14693 /* Bits [4:6] of the immediate in a list specifier encode register stride
14694 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14695 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14696 up the right value for "type" in a table based on this value and the given
14697 list style, then stick it back. */
14698 idx = ((inst.operands[0].imm >> 4) & 7)
14699 | (((inst.instruction >> 8) & 3) << 3);
14700
14701 typebits = typetable[idx];
14702
14703 constraint (typebits == -1, _("bad list type for instruction"));
14704
14705 inst.instruction &= ~0xf00;
14706 inst.instruction |= typebits << 8;
14707 }
14708
14709 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14710 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14711 otherwise. The variable arguments are a list of pairs of legal (size, align)
14712 values, terminated with -1. */
14713
14714 static int
14715 neon_alignment_bit (int size, int align, int *do_align, ...)
14716 {
14717 va_list ap;
14718 int result = FAIL, thissize, thisalign;
14719
14720 if (!inst.operands[1].immisalign)
14721 {
14722 *do_align = 0;
14723 return SUCCESS;
14724 }
14725
14726 va_start (ap, do_align);
14727
14728 do
14729 {
14730 thissize = va_arg (ap, int);
14731 if (thissize == -1)
14732 break;
14733 thisalign = va_arg (ap, int);
14734
14735 if (size == thissize && align == thisalign)
14736 result = SUCCESS;
14737 }
14738 while (result != SUCCESS);
14739
14740 va_end (ap);
14741
14742 if (result == SUCCESS)
14743 *do_align = 1;
14744 else
14745 first_error (_("unsupported alignment for instruction"));
14746
14747 return result;
14748 }
14749
14750 static void
14751 do_neon_ld_st_lane (void)
14752 {
14753 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14754 int align_good, do_align = 0;
14755 int logsize = neon_logbits (et.size);
14756 int align = inst.operands[1].imm >> 8;
14757 int n = (inst.instruction >> 8) & 3;
14758 int max_el = 64 / et.size;
14759
14760 if (et.type == NT_invtype)
14761 return;
14762
14763 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
14764 _("bad list length"));
14765 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
14766 _("scalar index out of range"));
14767 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
14768 && et.size == 8,
14769 _("stride of 2 unavailable when element size is 8"));
14770
14771 switch (n)
14772 {
14773 case 0: /* VLD1 / VST1. */
14774 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
14775 32, 32, -1);
14776 if (align_good == FAIL)
14777 return;
14778 if (do_align)
14779 {
14780 unsigned alignbits = 0;
14781 switch (et.size)
14782 {
14783 case 16: alignbits = 0x1; break;
14784 case 32: alignbits = 0x3; break;
14785 default: ;
14786 }
14787 inst.instruction |= alignbits << 4;
14788 }
14789 break;
14790
14791 case 1: /* VLD2 / VST2. */
14792 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
14793 32, 64, -1);
14794 if (align_good == FAIL)
14795 return;
14796 if (do_align)
14797 inst.instruction |= 1 << 4;
14798 break;
14799
14800 case 2: /* VLD3 / VST3. */
14801 constraint (inst.operands[1].immisalign,
14802 _("can't use alignment with this instruction"));
14803 break;
14804
14805 case 3: /* VLD4 / VST4. */
14806 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14807 16, 64, 32, 64, 32, 128, -1);
14808 if (align_good == FAIL)
14809 return;
14810 if (do_align)
14811 {
14812 unsigned alignbits = 0;
14813 switch (et.size)
14814 {
14815 case 8: alignbits = 0x1; break;
14816 case 16: alignbits = 0x1; break;
14817 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
14818 default: ;
14819 }
14820 inst.instruction |= alignbits << 4;
14821 }
14822 break;
14823
14824 default: ;
14825 }
14826
14827 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14828 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14829 inst.instruction |= 1 << (4 + logsize);
14830
14831 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
14832 inst.instruction |= logsize << 10;
14833 }
14834
14835 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14836
14837 static void
14838 do_neon_ld_dup (void)
14839 {
14840 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14841 int align_good, do_align = 0;
14842
14843 if (et.type == NT_invtype)
14844 return;
14845
14846 switch ((inst.instruction >> 8) & 3)
14847 {
14848 case 0: /* VLD1. */
14849 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
14850 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14851 &do_align, 16, 16, 32, 32, -1);
14852 if (align_good == FAIL)
14853 return;
14854 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
14855 {
14856 case 1: break;
14857 case 2: inst.instruction |= 1 << 5; break;
14858 default: first_error (_("bad list length")); return;
14859 }
14860 inst.instruction |= neon_logbits (et.size) << 6;
14861 break;
14862
14863 case 1: /* VLD2. */
14864 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14865 &do_align, 8, 16, 16, 32, 32, 64, -1);
14866 if (align_good == FAIL)
14867 return;
14868 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
14869 _("bad list length"));
14870 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14871 inst.instruction |= 1 << 5;
14872 inst.instruction |= neon_logbits (et.size) << 6;
14873 break;
14874
14875 case 2: /* VLD3. */
14876 constraint (inst.operands[1].immisalign,
14877 _("can't use alignment with this instruction"));
14878 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
14879 _("bad list length"));
14880 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14881 inst.instruction |= 1 << 5;
14882 inst.instruction |= neon_logbits (et.size) << 6;
14883 break;
14884
14885 case 3: /* VLD4. */
14886 {
14887 int align = inst.operands[1].imm >> 8;
14888 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14889 16, 64, 32, 64, 32, 128, -1);
14890 if (align_good == FAIL)
14891 return;
14892 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
14893 _("bad list length"));
14894 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14895 inst.instruction |= 1 << 5;
14896 if (et.size == 32 && align == 128)
14897 inst.instruction |= 0x3 << 6;
14898 else
14899 inst.instruction |= neon_logbits (et.size) << 6;
14900 }
14901 break;
14902
14903 default: ;
14904 }
14905
14906 inst.instruction |= do_align << 4;
14907 }
14908
14909 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14910 apart from bits [11:4]. */
14911
14912 static void
14913 do_neon_ldx_stx (void)
14914 {
14915 if (inst.operands[1].isreg)
14916 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
14917
14918 switch (NEON_LANE (inst.operands[0].imm))
14919 {
14920 case NEON_INTERLEAVE_LANES:
14921 NEON_ENCODE (INTERLV, inst);
14922 do_neon_ld_st_interleave ();
14923 break;
14924
14925 case NEON_ALL_LANES:
14926 NEON_ENCODE (DUP, inst);
14927 do_neon_ld_dup ();
14928 break;
14929
14930 default:
14931 NEON_ENCODE (LANE, inst);
14932 do_neon_ld_st_lane ();
14933 }
14934
14935 /* L bit comes from bit mask. */
14936 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14937 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14938 inst.instruction |= inst.operands[1].reg << 16;
14939
14940 if (inst.operands[1].postind)
14941 {
14942 int postreg = inst.operands[1].imm & 0xf;
14943 constraint (!inst.operands[1].immisreg,
14944 _("post-index must be a register"));
14945 constraint (postreg == 0xd || postreg == 0xf,
14946 _("bad register for post-index"));
14947 inst.instruction |= postreg;
14948 }
14949 else if (inst.operands[1].writeback)
14950 {
14951 inst.instruction |= 0xd;
14952 }
14953 else
14954 inst.instruction |= 0xf;
14955
14956 if (thumb_mode)
14957 inst.instruction |= 0xf9000000;
14958 else
14959 inst.instruction |= 0xf4000000;
14960 }
14961 \f
14962 /* Overall per-instruction processing. */
14963
14964 /* We need to be able to fix up arbitrary expressions in some statements.
14965 This is so that we can handle symbols that are an arbitrary distance from
14966 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14967 which returns part of an address in a form which will be valid for
14968 a data instruction. We do this by pushing the expression into a symbol
14969 in the expr_section, and creating a fix for that. */
14970
14971 static void
14972 fix_new_arm (fragS * frag,
14973 int where,
14974 short int size,
14975 expressionS * exp,
14976 int pc_rel,
14977 int reloc)
14978 {
14979 fixS * new_fix;
14980
14981 switch (exp->X_op)
14982 {
14983 case O_constant:
14984 case O_symbol:
14985 case O_add:
14986 case O_subtract:
14987 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
14988 (enum bfd_reloc_code_real) reloc);
14989 break;
14990
14991 default:
14992 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
14993 pc_rel, (enum bfd_reloc_code_real) reloc);
14994 break;
14995 }
14996
14997 /* Mark whether the fix is to a THUMB instruction, or an ARM
14998 instruction. */
14999 new_fix->tc_fix_data = thumb_mode;
15000 }
15001
15002 /* Create a frg for an instruction requiring relaxation. */
15003 static void
15004 output_relax_insn (void)
15005 {
15006 char * to;
15007 symbolS *sym;
15008 int offset;
15009
15010 /* The size of the instruction is unknown, so tie the debug info to the
15011 start of the instruction. */
15012 dwarf2_emit_insn (0);
15013
15014 switch (inst.reloc.exp.X_op)
15015 {
15016 case O_symbol:
15017 sym = inst.reloc.exp.X_add_symbol;
15018 offset = inst.reloc.exp.X_add_number;
15019 break;
15020 case O_constant:
15021 sym = NULL;
15022 offset = inst.reloc.exp.X_add_number;
15023 break;
15024 default:
15025 sym = make_expr_symbol (&inst.reloc.exp);
15026 offset = 0;
15027 break;
15028 }
15029 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
15030 inst.relax, sym, offset, NULL/*offset, opcode*/);
15031 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
15032 }
15033
15034 /* Write a 32-bit thumb instruction to buf. */
15035 static void
15036 put_thumb32_insn (char * buf, unsigned long insn)
15037 {
15038 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
15039 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
15040 }
15041
15042 static void
15043 output_inst (const char * str)
15044 {
15045 char * to = NULL;
15046
15047 if (inst.error)
15048 {
15049 as_bad ("%s -- `%s'", inst.error, str);
15050 return;
15051 }
15052 if (inst.relax)
15053 {
15054 output_relax_insn ();
15055 return;
15056 }
15057 if (inst.size == 0)
15058 return;
15059
15060 to = frag_more (inst.size);
15061 /* PR 9814: Record the thumb mode into the current frag so that we know
15062 what type of NOP padding to use, if necessary. We override any previous
15063 setting so that if the mode has changed then the NOPS that we use will
15064 match the encoding of the last instruction in the frag. */
15065 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
15066
15067 if (thumb_mode && (inst.size > THUMB_SIZE))
15068 {
15069 gas_assert (inst.size == (2 * THUMB_SIZE));
15070 put_thumb32_insn (to, inst.instruction);
15071 }
15072 else if (inst.size > INSN_SIZE)
15073 {
15074 gas_assert (inst.size == (2 * INSN_SIZE));
15075 md_number_to_chars (to, inst.instruction, INSN_SIZE);
15076 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
15077 }
15078 else
15079 md_number_to_chars (to, inst.instruction, inst.size);
15080
15081 if (inst.reloc.type != BFD_RELOC_UNUSED)
15082 fix_new_arm (frag_now, to - frag_now->fr_literal,
15083 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
15084 inst.reloc.type);
15085
15086 dwarf2_emit_insn (inst.size);
15087 }
15088
15089 static char *
15090 output_it_inst (int cond, int mask, char * to)
15091 {
15092 unsigned long instruction = 0xbf00;
15093
15094 mask &= 0xf;
15095 instruction |= mask;
15096 instruction |= cond << 4;
15097
15098 if (to == NULL)
15099 {
15100 to = frag_more (2);
15101 #ifdef OBJ_ELF
15102 dwarf2_emit_insn (2);
15103 #endif
15104 }
15105
15106 md_number_to_chars (to, instruction, 2);
15107
15108 return to;
15109 }
15110
15111 /* Tag values used in struct asm_opcode's tag field. */
15112 enum opcode_tag
15113 {
15114 OT_unconditional, /* Instruction cannot be conditionalized.
15115 The ARM condition field is still 0xE. */
15116 OT_unconditionalF, /* Instruction cannot be conditionalized
15117 and carries 0xF in its ARM condition field. */
15118 OT_csuffix, /* Instruction takes a conditional suffix. */
15119 OT_csuffixF, /* Some forms of the instruction take a conditional
15120 suffix, others place 0xF where the condition field
15121 would be. */
15122 OT_cinfix3, /* Instruction takes a conditional infix,
15123 beginning at character index 3. (In
15124 unified mode, it becomes a suffix.) */
15125 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
15126 tsts, cmps, cmns, and teqs. */
15127 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
15128 character index 3, even in unified mode. Used for
15129 legacy instructions where suffix and infix forms
15130 may be ambiguous. */
15131 OT_csuf_or_in3, /* Instruction takes either a conditional
15132 suffix or an infix at character index 3. */
15133 OT_odd_infix_unc, /* This is the unconditional variant of an
15134 instruction that takes a conditional infix
15135 at an unusual position. In unified mode,
15136 this variant will accept a suffix. */
15137 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
15138 are the conditional variants of instructions that
15139 take conditional infixes in unusual positions.
15140 The infix appears at character index
15141 (tag - OT_odd_infix_0). These are not accepted
15142 in unified mode. */
15143 };
15144
15145 /* Subroutine of md_assemble, responsible for looking up the primary
15146 opcode from the mnemonic the user wrote. STR points to the
15147 beginning of the mnemonic.
15148
15149 This is not simply a hash table lookup, because of conditional
15150 variants. Most instructions have conditional variants, which are
15151 expressed with a _conditional affix_ to the mnemonic. If we were
15152 to encode each conditional variant as a literal string in the opcode
15153 table, it would have approximately 20,000 entries.
15154
15155 Most mnemonics take this affix as a suffix, and in unified syntax,
15156 'most' is upgraded to 'all'. However, in the divided syntax, some
15157 instructions take the affix as an infix, notably the s-variants of
15158 the arithmetic instructions. Of those instructions, all but six
15159 have the infix appear after the third character of the mnemonic.
15160
15161 Accordingly, the algorithm for looking up primary opcodes given
15162 an identifier is:
15163
15164 1. Look up the identifier in the opcode table.
15165 If we find a match, go to step U.
15166
15167 2. Look up the last two characters of the identifier in the
15168 conditions table. If we find a match, look up the first N-2
15169 characters of the identifier in the opcode table. If we
15170 find a match, go to step CE.
15171
15172 3. Look up the fourth and fifth characters of the identifier in
15173 the conditions table. If we find a match, extract those
15174 characters from the identifier, and look up the remaining
15175 characters in the opcode table. If we find a match, go
15176 to step CM.
15177
15178 4. Fail.
15179
15180 U. Examine the tag field of the opcode structure, in case this is
15181 one of the six instructions with its conditional infix in an
15182 unusual place. If it is, the tag tells us where to find the
15183 infix; look it up in the conditions table and set inst.cond
15184 accordingly. Otherwise, this is an unconditional instruction.
15185 Again set inst.cond accordingly. Return the opcode structure.
15186
15187 CE. Examine the tag field to make sure this is an instruction that
15188 should receive a conditional suffix. If it is not, fail.
15189 Otherwise, set inst.cond from the suffix we already looked up,
15190 and return the opcode structure.
15191
15192 CM. Examine the tag field to make sure this is an instruction that
15193 should receive a conditional infix after the third character.
15194 If it is not, fail. Otherwise, undo the edits to the current
15195 line of input and proceed as for case CE. */
15196
15197 static const struct asm_opcode *
15198 opcode_lookup (char **str)
15199 {
15200 char *end, *base;
15201 char *affix;
15202 const struct asm_opcode *opcode;
15203 const struct asm_cond *cond;
15204 char save[2];
15205
15206 /* Scan up to the end of the mnemonic, which must end in white space,
15207 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
15208 for (base = end = *str; *end != '\0'; end++)
15209 if (*end == ' ' || *end == '.')
15210 break;
15211
15212 if (end == base)
15213 return NULL;
15214
15215 /* Handle a possible width suffix and/or Neon type suffix. */
15216 if (end[0] == '.')
15217 {
15218 int offset = 2;
15219
15220 /* The .w and .n suffixes are only valid if the unified syntax is in
15221 use. */
15222 if (unified_syntax && end[1] == 'w')
15223 inst.size_req = 4;
15224 else if (unified_syntax && end[1] == 'n')
15225 inst.size_req = 2;
15226 else
15227 offset = 0;
15228
15229 inst.vectype.elems = 0;
15230
15231 *str = end + offset;
15232
15233 if (end[offset] == '.')
15234 {
15235 /* See if we have a Neon type suffix (possible in either unified or
15236 non-unified ARM syntax mode). */
15237 if (parse_neon_type (&inst.vectype, str) == FAIL)
15238 return NULL;
15239 }
15240 else if (end[offset] != '\0' && end[offset] != ' ')
15241 return NULL;
15242 }
15243 else
15244 *str = end;
15245
15246 /* Look for unaffixed or special-case affixed mnemonic. */
15247 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15248 end - base);
15249 if (opcode)
15250 {
15251 /* step U */
15252 if (opcode->tag < OT_odd_infix_0)
15253 {
15254 inst.cond = COND_ALWAYS;
15255 return opcode;
15256 }
15257
15258 if (warn_on_deprecated && unified_syntax)
15259 as_warn (_("conditional infixes are deprecated in unified syntax"));
15260 affix = base + (opcode->tag - OT_odd_infix_0);
15261 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15262 gas_assert (cond);
15263
15264 inst.cond = cond->value;
15265 return opcode;
15266 }
15267
15268 /* Cannot have a conditional suffix on a mnemonic of less than two
15269 characters. */
15270 if (end - base < 3)
15271 return NULL;
15272
15273 /* Look for suffixed mnemonic. */
15274 affix = end - 2;
15275 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15276 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15277 affix - base);
15278 if (opcode && cond)
15279 {
15280 /* step CE */
15281 switch (opcode->tag)
15282 {
15283 case OT_cinfix3_legacy:
15284 /* Ignore conditional suffixes matched on infix only mnemonics. */
15285 break;
15286
15287 case OT_cinfix3:
15288 case OT_cinfix3_deprecated:
15289 case OT_odd_infix_unc:
15290 if (!unified_syntax)
15291 return 0;
15292 /* else fall through */
15293
15294 case OT_csuffix:
15295 case OT_csuffixF:
15296 case OT_csuf_or_in3:
15297 inst.cond = cond->value;
15298 return opcode;
15299
15300 case OT_unconditional:
15301 case OT_unconditionalF:
15302 if (thumb_mode)
15303 inst.cond = cond->value;
15304 else
15305 {
15306 /* Delayed diagnostic. */
15307 inst.error = BAD_COND;
15308 inst.cond = COND_ALWAYS;
15309 }
15310 return opcode;
15311
15312 default:
15313 return NULL;
15314 }
15315 }
15316
15317 /* Cannot have a usual-position infix on a mnemonic of less than
15318 six characters (five would be a suffix). */
15319 if (end - base < 6)
15320 return NULL;
15321
15322 /* Look for infixed mnemonic in the usual position. */
15323 affix = base + 3;
15324 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15325 if (!cond)
15326 return NULL;
15327
15328 memcpy (save, affix, 2);
15329 memmove (affix, affix + 2, (end - affix) - 2);
15330 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15331 (end - base) - 2);
15332 memmove (affix + 2, affix, (end - affix) - 2);
15333 memcpy (affix, save, 2);
15334
15335 if (opcode
15336 && (opcode->tag == OT_cinfix3
15337 || opcode->tag == OT_cinfix3_deprecated
15338 || opcode->tag == OT_csuf_or_in3
15339 || opcode->tag == OT_cinfix3_legacy))
15340 {
15341 /* Step CM. */
15342 if (warn_on_deprecated && unified_syntax
15343 && (opcode->tag == OT_cinfix3
15344 || opcode->tag == OT_cinfix3_deprecated))
15345 as_warn (_("conditional infixes are deprecated in unified syntax"));
15346
15347 inst.cond = cond->value;
15348 return opcode;
15349 }
15350
15351 return NULL;
15352 }
15353
15354 /* This function generates an initial IT instruction, leaving its block
15355 virtually open for the new instructions. Eventually,
15356 the mask will be updated by now_it_add_mask () each time
15357 a new instruction needs to be included in the IT block.
15358 Finally, the block is closed with close_automatic_it_block ().
15359 The block closure can be requested either from md_assemble (),
15360 a tencode (), or due to a label hook. */
15361
15362 static void
15363 new_automatic_it_block (int cond)
15364 {
15365 now_it.state = AUTOMATIC_IT_BLOCK;
15366 now_it.mask = 0x18;
15367 now_it.cc = cond;
15368 now_it.block_length = 1;
15369 mapping_state (MAP_THUMB);
15370 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
15371 }
15372
15373 /* Close an automatic IT block.
15374 See comments in new_automatic_it_block (). */
15375
15376 static void
15377 close_automatic_it_block (void)
15378 {
15379 now_it.mask = 0x10;
15380 now_it.block_length = 0;
15381 }
15382
15383 /* Update the mask of the current automatically-generated IT
15384 instruction. See comments in new_automatic_it_block (). */
15385
15386 static void
15387 now_it_add_mask (int cond)
15388 {
15389 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15390 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15391 | ((bitvalue) << (nbit)))
15392 const int resulting_bit = (cond & 1);
15393
15394 now_it.mask &= 0xf;
15395 now_it.mask = SET_BIT_VALUE (now_it.mask,
15396 resulting_bit,
15397 (5 - now_it.block_length));
15398 now_it.mask = SET_BIT_VALUE (now_it.mask,
15399 1,
15400 ((5 - now_it.block_length) - 1) );
15401 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
15402
15403 #undef CLEAR_BIT
15404 #undef SET_BIT_VALUE
15405 }
15406
15407 /* The IT blocks handling machinery is accessed through the these functions:
15408 it_fsm_pre_encode () from md_assemble ()
15409 set_it_insn_type () optional, from the tencode functions
15410 set_it_insn_type_last () ditto
15411 in_it_block () ditto
15412 it_fsm_post_encode () from md_assemble ()
15413 force_automatic_it_block_close () from label habdling functions
15414
15415 Rationale:
15416 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15417 initializing the IT insn type with a generic initial value depending
15418 on the inst.condition.
15419 2) During the tencode function, two things may happen:
15420 a) The tencode function overrides the IT insn type by
15421 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15422 b) The tencode function queries the IT block state by
15423 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15424
15425 Both set_it_insn_type and in_it_block run the internal FSM state
15426 handling function (handle_it_state), because: a) setting the IT insn
15427 type may incur in an invalid state (exiting the function),
15428 and b) querying the state requires the FSM to be updated.
15429 Specifically we want to avoid creating an IT block for conditional
15430 branches, so it_fsm_pre_encode is actually a guess and we can't
15431 determine whether an IT block is required until the tencode () routine
15432 has decided what type of instruction this actually it.
15433 Because of this, if set_it_insn_type and in_it_block have to be used,
15434 set_it_insn_type has to be called first.
15435
15436 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15437 determines the insn IT type depending on the inst.cond code.
15438 When a tencode () routine encodes an instruction that can be
15439 either outside an IT block, or, in the case of being inside, has to be
15440 the last one, set_it_insn_type_last () will determine the proper
15441 IT instruction type based on the inst.cond code. Otherwise,
15442 set_it_insn_type can be called for overriding that logic or
15443 for covering other cases.
15444
15445 Calling handle_it_state () may not transition the IT block state to
15446 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15447 still queried. Instead, if the FSM determines that the state should
15448 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15449 after the tencode () function: that's what it_fsm_post_encode () does.
15450
15451 Since in_it_block () calls the state handling function to get an
15452 updated state, an error may occur (due to invalid insns combination).
15453 In that case, inst.error is set.
15454 Therefore, inst.error has to be checked after the execution of
15455 the tencode () routine.
15456
15457 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15458 any pending state change (if any) that didn't take place in
15459 handle_it_state () as explained above. */
15460
15461 static void
15462 it_fsm_pre_encode (void)
15463 {
15464 if (inst.cond != COND_ALWAYS)
15465 inst.it_insn_type = INSIDE_IT_INSN;
15466 else
15467 inst.it_insn_type = OUTSIDE_IT_INSN;
15468
15469 now_it.state_handled = 0;
15470 }
15471
15472 /* IT state FSM handling function. */
15473
15474 static int
15475 handle_it_state (void)
15476 {
15477 now_it.state_handled = 1;
15478
15479 switch (now_it.state)
15480 {
15481 case OUTSIDE_IT_BLOCK:
15482 switch (inst.it_insn_type)
15483 {
15484 case OUTSIDE_IT_INSN:
15485 break;
15486
15487 case INSIDE_IT_INSN:
15488 case INSIDE_IT_LAST_INSN:
15489 if (thumb_mode == 0)
15490 {
15491 if (unified_syntax
15492 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
15493 as_tsktsk (_("Warning: conditional outside an IT block"\
15494 " for Thumb."));
15495 }
15496 else
15497 {
15498 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
15499 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
15500 {
15501 /* Automatically generate the IT instruction. */
15502 new_automatic_it_block (inst.cond);
15503 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
15504 close_automatic_it_block ();
15505 }
15506 else
15507 {
15508 inst.error = BAD_OUT_IT;
15509 return FAIL;
15510 }
15511 }
15512 break;
15513
15514 case IF_INSIDE_IT_LAST_INSN:
15515 case NEUTRAL_IT_INSN:
15516 break;
15517
15518 case IT_INSN:
15519 now_it.state = MANUAL_IT_BLOCK;
15520 now_it.block_length = 0;
15521 break;
15522 }
15523 break;
15524
15525 case AUTOMATIC_IT_BLOCK:
15526 /* Three things may happen now:
15527 a) We should increment current it block size;
15528 b) We should close current it block (closing insn or 4 insns);
15529 c) We should close current it block and start a new one (due
15530 to incompatible conditions or
15531 4 insns-length block reached). */
15532
15533 switch (inst.it_insn_type)
15534 {
15535 case OUTSIDE_IT_INSN:
15536 /* The closure of the block shall happen immediatelly,
15537 so any in_it_block () call reports the block as closed. */
15538 force_automatic_it_block_close ();
15539 break;
15540
15541 case INSIDE_IT_INSN:
15542 case INSIDE_IT_LAST_INSN:
15543 case IF_INSIDE_IT_LAST_INSN:
15544 now_it.block_length++;
15545
15546 if (now_it.block_length > 4
15547 || !now_it_compatible (inst.cond))
15548 {
15549 force_automatic_it_block_close ();
15550 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
15551 new_automatic_it_block (inst.cond);
15552 }
15553 else
15554 {
15555 now_it_add_mask (inst.cond);
15556 }
15557
15558 if (now_it.state == AUTOMATIC_IT_BLOCK
15559 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
15560 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
15561 close_automatic_it_block ();
15562 break;
15563
15564 case NEUTRAL_IT_INSN:
15565 now_it.block_length++;
15566
15567 if (now_it.block_length > 4)
15568 force_automatic_it_block_close ();
15569 else
15570 now_it_add_mask (now_it.cc & 1);
15571 break;
15572
15573 case IT_INSN:
15574 close_automatic_it_block ();
15575 now_it.state = MANUAL_IT_BLOCK;
15576 break;
15577 }
15578 break;
15579
15580 case MANUAL_IT_BLOCK:
15581 {
15582 /* Check conditional suffixes. */
15583 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
15584 int is_last;
15585 now_it.mask <<= 1;
15586 now_it.mask &= 0x1f;
15587 is_last = (now_it.mask == 0x10);
15588
15589 switch (inst.it_insn_type)
15590 {
15591 case OUTSIDE_IT_INSN:
15592 inst.error = BAD_NOT_IT;
15593 return FAIL;
15594
15595 case INSIDE_IT_INSN:
15596 if (cond != inst.cond)
15597 {
15598 inst.error = BAD_IT_COND;
15599 return FAIL;
15600 }
15601 break;
15602
15603 case INSIDE_IT_LAST_INSN:
15604 case IF_INSIDE_IT_LAST_INSN:
15605 if (cond != inst.cond)
15606 {
15607 inst.error = BAD_IT_COND;
15608 return FAIL;
15609 }
15610 if (!is_last)
15611 {
15612 inst.error = BAD_BRANCH;
15613 return FAIL;
15614 }
15615 break;
15616
15617 case NEUTRAL_IT_INSN:
15618 /* The BKPT instruction is unconditional even in an IT block. */
15619 break;
15620
15621 case IT_INSN:
15622 inst.error = BAD_IT_IT;
15623 return FAIL;
15624 }
15625 }
15626 break;
15627 }
15628
15629 return SUCCESS;
15630 }
15631
15632 static void
15633 it_fsm_post_encode (void)
15634 {
15635 int is_last;
15636
15637 if (!now_it.state_handled)
15638 handle_it_state ();
15639
15640 is_last = (now_it.mask == 0x10);
15641 if (is_last)
15642 {
15643 now_it.state = OUTSIDE_IT_BLOCK;
15644 now_it.mask = 0;
15645 }
15646 }
15647
15648 static void
15649 force_automatic_it_block_close (void)
15650 {
15651 if (now_it.state == AUTOMATIC_IT_BLOCK)
15652 {
15653 close_automatic_it_block ();
15654 now_it.state = OUTSIDE_IT_BLOCK;
15655 now_it.mask = 0;
15656 }
15657 }
15658
15659 static int
15660 in_it_block (void)
15661 {
15662 if (!now_it.state_handled)
15663 handle_it_state ();
15664
15665 return now_it.state != OUTSIDE_IT_BLOCK;
15666 }
15667
15668 void
15669 md_assemble (char *str)
15670 {
15671 char *p = str;
15672 const struct asm_opcode * opcode;
15673
15674 /* Align the previous label if needed. */
15675 if (last_label_seen != NULL)
15676 {
15677 symbol_set_frag (last_label_seen, frag_now);
15678 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
15679 S_SET_SEGMENT (last_label_seen, now_seg);
15680 }
15681
15682 memset (&inst, '\0', sizeof (inst));
15683 inst.reloc.type = BFD_RELOC_UNUSED;
15684
15685 opcode = opcode_lookup (&p);
15686 if (!opcode)
15687 {
15688 /* It wasn't an instruction, but it might be a register alias of
15689 the form alias .req reg, or a Neon .dn/.qn directive. */
15690 if (! create_register_alias (str, p)
15691 && ! create_neon_reg_alias (str, p))
15692 as_bad (_("bad instruction `%s'"), str);
15693
15694 return;
15695 }
15696
15697 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
15698 as_warn (_("s suffix on comparison instruction is deprecated"));
15699
15700 /* The value which unconditional instructions should have in place of the
15701 condition field. */
15702 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
15703
15704 if (thumb_mode)
15705 {
15706 arm_feature_set variant;
15707
15708 variant = cpu_variant;
15709 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15710 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
15711 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
15712 /* Check that this instruction is supported for this CPU. */
15713 if (!opcode->tvariant
15714 || (thumb_mode == 1
15715 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
15716 {
15717 as_bad (_("selected processor does not support `%s'"), str);
15718 return;
15719 }
15720 if (inst.cond != COND_ALWAYS && !unified_syntax
15721 && opcode->tencode != do_t_branch)
15722 {
15723 as_bad (_("Thumb does not support conditional execution"));
15724 return;
15725 }
15726
15727 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
15728 {
15729 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
15730 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
15731 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
15732 {
15733 /* Two things are addressed here.
15734 1) Implicit require narrow instructions on Thumb-1.
15735 This avoids relaxation accidentally introducing Thumb-2
15736 instructions.
15737 2) Reject wide instructions in non Thumb-2 cores. */
15738 if (inst.size_req == 0)
15739 inst.size_req = 2;
15740 else if (inst.size_req == 4)
15741 {
15742 as_bad (_("selected processor does not support `%s'"), str);
15743 return;
15744 }
15745 }
15746 }
15747
15748 inst.instruction = opcode->tvalue;
15749
15750 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
15751 {
15752 /* Prepare the it_insn_type for those encodings that don't set
15753 it. */
15754 it_fsm_pre_encode ();
15755
15756 opcode->tencode ();
15757
15758 it_fsm_post_encode ();
15759 }
15760
15761 if (!(inst.error || inst.relax))
15762 {
15763 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
15764 inst.size = (inst.instruction > 0xffff ? 4 : 2);
15765 if (inst.size_req && inst.size_req != inst.size)
15766 {
15767 as_bad (_("cannot honor width suffix -- `%s'"), str);
15768 return;
15769 }
15770 }
15771
15772 /* Something has gone badly wrong if we try to relax a fixed size
15773 instruction. */
15774 gas_assert (inst.size_req == 0 || !inst.relax);
15775
15776 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15777 *opcode->tvariant);
15778 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15779 set those bits when Thumb-2 32-bit instructions are seen. ie.
15780 anything other than bl/blx and v6-M instructions.
15781 This is overly pessimistic for relaxable instructions. */
15782 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
15783 || inst.relax)
15784 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
15785 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
15786 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15787 arm_ext_v6t2);
15788
15789 check_neon_suffixes;
15790
15791 if (!inst.error)
15792 {
15793 mapping_state (MAP_THUMB);
15794 }
15795 }
15796 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
15797 {
15798 bfd_boolean is_bx;
15799
15800 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
15801 is_bx = (opcode->aencode == do_bx);
15802
15803 /* Check that this instruction is supported for this CPU. */
15804 if (!(is_bx && fix_v4bx)
15805 && !(opcode->avariant &&
15806 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
15807 {
15808 as_bad (_("selected processor does not support `%s'"), str);
15809 return;
15810 }
15811 if (inst.size_req)
15812 {
15813 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
15814 return;
15815 }
15816
15817 inst.instruction = opcode->avalue;
15818 if (opcode->tag == OT_unconditionalF)
15819 inst.instruction |= 0xF << 28;
15820 else
15821 inst.instruction |= inst.cond << 28;
15822 inst.size = INSN_SIZE;
15823 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
15824 {
15825 it_fsm_pre_encode ();
15826 opcode->aencode ();
15827 it_fsm_post_encode ();
15828 }
15829 /* Arm mode bx is marked as both v4T and v5 because it's still required
15830 on a hypothetical non-thumb v5 core. */
15831 if (is_bx)
15832 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
15833 else
15834 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
15835 *opcode->avariant);
15836
15837 check_neon_suffixes;
15838
15839 if (!inst.error)
15840 {
15841 mapping_state (MAP_ARM);
15842 }
15843 }
15844 else
15845 {
15846 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
15847 "-- `%s'"), str);
15848 return;
15849 }
15850 output_inst (str);
15851 }
15852
15853 static void
15854 check_it_blocks_finished (void)
15855 {
15856 #ifdef OBJ_ELF
15857 asection *sect;
15858
15859 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
15860 if (seg_info (sect)->tc_segment_info_data.current_it.state
15861 == MANUAL_IT_BLOCK)
15862 {
15863 as_warn (_("section '%s' finished with an open IT block."),
15864 sect->name);
15865 }
15866 #else
15867 if (now_it.state == MANUAL_IT_BLOCK)
15868 as_warn (_("file finished with an open IT block."));
15869 #endif
15870 }
15871
15872 /* Various frobbings of labels and their addresses. */
15873
15874 void
15875 arm_start_line_hook (void)
15876 {
15877 last_label_seen = NULL;
15878 }
15879
15880 void
15881 arm_frob_label (symbolS * sym)
15882 {
15883 last_label_seen = sym;
15884
15885 ARM_SET_THUMB (sym, thumb_mode);
15886
15887 #if defined OBJ_COFF || defined OBJ_ELF
15888 ARM_SET_INTERWORK (sym, support_interwork);
15889 #endif
15890
15891 force_automatic_it_block_close ();
15892
15893 /* Note - do not allow local symbols (.Lxxx) to be labelled
15894 as Thumb functions. This is because these labels, whilst
15895 they exist inside Thumb code, are not the entry points for
15896 possible ARM->Thumb calls. Also, these labels can be used
15897 as part of a computed goto or switch statement. eg gcc
15898 can generate code that looks like this:
15899
15900 ldr r2, [pc, .Laaa]
15901 lsl r3, r3, #2
15902 ldr r2, [r3, r2]
15903 mov pc, r2
15904
15905 .Lbbb: .word .Lxxx
15906 .Lccc: .word .Lyyy
15907 ..etc...
15908 .Laaa: .word Lbbb
15909
15910 The first instruction loads the address of the jump table.
15911 The second instruction converts a table index into a byte offset.
15912 The third instruction gets the jump address out of the table.
15913 The fourth instruction performs the jump.
15914
15915 If the address stored at .Laaa is that of a symbol which has the
15916 Thumb_Func bit set, then the linker will arrange for this address
15917 to have the bottom bit set, which in turn would mean that the
15918 address computation performed by the third instruction would end
15919 up with the bottom bit set. Since the ARM is capable of unaligned
15920 word loads, the instruction would then load the incorrect address
15921 out of the jump table, and chaos would ensue. */
15922 if (label_is_thumb_function_name
15923 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
15924 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
15925 {
15926 /* When the address of a Thumb function is taken the bottom
15927 bit of that address should be set. This will allow
15928 interworking between Arm and Thumb functions to work
15929 correctly. */
15930
15931 THUMB_SET_FUNC (sym, 1);
15932
15933 label_is_thumb_function_name = FALSE;
15934 }
15935
15936 dwarf2_emit_label (sym);
15937 }
15938
15939 bfd_boolean
15940 arm_data_in_code (void)
15941 {
15942 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
15943 {
15944 *input_line_pointer = '/';
15945 input_line_pointer += 5;
15946 *input_line_pointer = 0;
15947 return TRUE;
15948 }
15949
15950 return FALSE;
15951 }
15952
15953 char *
15954 arm_canonicalize_symbol_name (char * name)
15955 {
15956 int len;
15957
15958 if (thumb_mode && (len = strlen (name)) > 5
15959 && streq (name + len - 5, "/data"))
15960 *(name + len - 5) = 0;
15961
15962 return name;
15963 }
15964 \f
15965 /* Table of all register names defined by default. The user can
15966 define additional names with .req. Note that all register names
15967 should appear in both upper and lowercase variants. Some registers
15968 also have mixed-case names. */
15969
15970 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
15971 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
15972 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
15973 #define REGSET(p,t) \
15974 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
15975 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
15976 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
15977 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
15978 #define REGSETH(p,t) \
15979 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
15980 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
15981 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
15982 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
15983 #define REGSET2(p,t) \
15984 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
15985 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
15986 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
15987 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
15988
15989 static const struct reg_entry reg_names[] =
15990 {
15991 /* ARM integer registers. */
15992 REGSET(r, RN), REGSET(R, RN),
15993
15994 /* ATPCS synonyms. */
15995 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
15996 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
15997 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
15998
15999 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
16000 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
16001 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
16002
16003 /* Well-known aliases. */
16004 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
16005 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
16006
16007 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
16008 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
16009
16010 /* Coprocessor numbers. */
16011 REGSET(p, CP), REGSET(P, CP),
16012
16013 /* Coprocessor register numbers. The "cr" variants are for backward
16014 compatibility. */
16015 REGSET(c, CN), REGSET(C, CN),
16016 REGSET(cr, CN), REGSET(CR, CN),
16017
16018 /* FPA registers. */
16019 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
16020 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
16021
16022 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
16023 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
16024
16025 /* VFP SP registers. */
16026 REGSET(s,VFS), REGSET(S,VFS),
16027 REGSETH(s,VFS), REGSETH(S,VFS),
16028
16029 /* VFP DP Registers. */
16030 REGSET(d,VFD), REGSET(D,VFD),
16031 /* Extra Neon DP registers. */
16032 REGSETH(d,VFD), REGSETH(D,VFD),
16033
16034 /* Neon QP registers. */
16035 REGSET2(q,NQ), REGSET2(Q,NQ),
16036
16037 /* VFP control registers. */
16038 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
16039 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
16040 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
16041 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
16042 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
16043 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
16044
16045 /* Maverick DSP coprocessor registers. */
16046 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
16047 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
16048
16049 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
16050 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
16051 REGDEF(dspsc,0,DSPSC),
16052
16053 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
16054 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
16055 REGDEF(DSPSC,0,DSPSC),
16056
16057 /* iWMMXt data registers - p0, c0-15. */
16058 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
16059
16060 /* iWMMXt control registers - p1, c0-3. */
16061 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
16062 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
16063 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
16064 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
16065
16066 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
16067 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
16068 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
16069 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
16070 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
16071
16072 /* XScale accumulator registers. */
16073 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
16074 };
16075 #undef REGDEF
16076 #undef REGNUM
16077 #undef REGSET
16078
16079 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
16080 within psr_required_here. */
16081 static const struct asm_psr psrs[] =
16082 {
16083 /* Backward compatibility notation. Note that "all" is no longer
16084 truly all possible PSR bits. */
16085 {"all", PSR_c | PSR_f},
16086 {"flg", PSR_f},
16087 {"ctl", PSR_c},
16088
16089 /* Individual flags. */
16090 {"f", PSR_f},
16091 {"c", PSR_c},
16092 {"x", PSR_x},
16093 {"s", PSR_s},
16094 /* Combinations of flags. */
16095 {"fs", PSR_f | PSR_s},
16096 {"fx", PSR_f | PSR_x},
16097 {"fc", PSR_f | PSR_c},
16098 {"sf", PSR_s | PSR_f},
16099 {"sx", PSR_s | PSR_x},
16100 {"sc", PSR_s | PSR_c},
16101 {"xf", PSR_x | PSR_f},
16102 {"xs", PSR_x | PSR_s},
16103 {"xc", PSR_x | PSR_c},
16104 {"cf", PSR_c | PSR_f},
16105 {"cs", PSR_c | PSR_s},
16106 {"cx", PSR_c | PSR_x},
16107 {"fsx", PSR_f | PSR_s | PSR_x},
16108 {"fsc", PSR_f | PSR_s | PSR_c},
16109 {"fxs", PSR_f | PSR_x | PSR_s},
16110 {"fxc", PSR_f | PSR_x | PSR_c},
16111 {"fcs", PSR_f | PSR_c | PSR_s},
16112 {"fcx", PSR_f | PSR_c | PSR_x},
16113 {"sfx", PSR_s | PSR_f | PSR_x},
16114 {"sfc", PSR_s | PSR_f | PSR_c},
16115 {"sxf", PSR_s | PSR_x | PSR_f},
16116 {"sxc", PSR_s | PSR_x | PSR_c},
16117 {"scf", PSR_s | PSR_c | PSR_f},
16118 {"scx", PSR_s | PSR_c | PSR_x},
16119 {"xfs", PSR_x | PSR_f | PSR_s},
16120 {"xfc", PSR_x | PSR_f | PSR_c},
16121 {"xsf", PSR_x | PSR_s | PSR_f},
16122 {"xsc", PSR_x | PSR_s | PSR_c},
16123 {"xcf", PSR_x | PSR_c | PSR_f},
16124 {"xcs", PSR_x | PSR_c | PSR_s},
16125 {"cfs", PSR_c | PSR_f | PSR_s},
16126 {"cfx", PSR_c | PSR_f | PSR_x},
16127 {"csf", PSR_c | PSR_s | PSR_f},
16128 {"csx", PSR_c | PSR_s | PSR_x},
16129 {"cxf", PSR_c | PSR_x | PSR_f},
16130 {"cxs", PSR_c | PSR_x | PSR_s},
16131 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
16132 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
16133 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
16134 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
16135 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
16136 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
16137 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
16138 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
16139 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
16140 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
16141 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
16142 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
16143 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
16144 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
16145 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
16146 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
16147 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
16148 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
16149 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
16150 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
16151 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
16152 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
16153 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
16154 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
16155 };
16156
16157 /* Table of V7M psr names. */
16158 static const struct asm_psr v7m_psrs[] =
16159 {
16160 {"apsr", 0 }, {"APSR", 0 },
16161 {"iapsr", 1 }, {"IAPSR", 1 },
16162 {"eapsr", 2 }, {"EAPSR", 2 },
16163 {"psr", 3 }, {"PSR", 3 },
16164 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
16165 {"ipsr", 5 }, {"IPSR", 5 },
16166 {"epsr", 6 }, {"EPSR", 6 },
16167 {"iepsr", 7 }, {"IEPSR", 7 },
16168 {"msp", 8 }, {"MSP", 8 },
16169 {"psp", 9 }, {"PSP", 9 },
16170 {"primask", 16}, {"PRIMASK", 16},
16171 {"basepri", 17}, {"BASEPRI", 17},
16172 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
16173 {"faultmask", 19}, {"FAULTMASK", 19},
16174 {"control", 20}, {"CONTROL", 20}
16175 };
16176
16177 /* Table of all shift-in-operand names. */
16178 static const struct asm_shift_name shift_names [] =
16179 {
16180 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
16181 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
16182 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
16183 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
16184 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
16185 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
16186 };
16187
16188 /* Table of all explicit relocation names. */
16189 #ifdef OBJ_ELF
16190 static struct reloc_entry reloc_names[] =
16191 {
16192 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
16193 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
16194 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
16195 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
16196 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
16197 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
16198 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
16199 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
16200 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
16201 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
16202 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
16203 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL}
16204 };
16205 #endif
16206
16207 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
16208 static const struct asm_cond conds[] =
16209 {
16210 {"eq", 0x0},
16211 {"ne", 0x1},
16212 {"cs", 0x2}, {"hs", 0x2},
16213 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16214 {"mi", 0x4},
16215 {"pl", 0x5},
16216 {"vs", 0x6},
16217 {"vc", 0x7},
16218 {"hi", 0x8},
16219 {"ls", 0x9},
16220 {"ge", 0xa},
16221 {"lt", 0xb},
16222 {"gt", 0xc},
16223 {"le", 0xd},
16224 {"al", 0xe}
16225 };
16226
16227 static struct asm_barrier_opt barrier_opt_names[] =
16228 {
16229 { "sy", 0xf },
16230 { "un", 0x7 },
16231 { "st", 0xe },
16232 { "unst", 0x6 }
16233 };
16234
16235 /* Table of ARM-format instructions. */
16236
16237 /* Macros for gluing together operand strings. N.B. In all cases
16238 other than OPS0, the trailing OP_stop comes from default
16239 zero-initialization of the unspecified elements of the array. */
16240 #define OPS0() { OP_stop, }
16241 #define OPS1(a) { OP_##a, }
16242 #define OPS2(a,b) { OP_##a,OP_##b, }
16243 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
16244 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
16245 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16246 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16247
16248 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
16249 This is useful when mixing operands for ARM and THUMB, i.e. using the
16250 MIX_ARM_THUMB_OPERANDS macro.
16251 In order to use these macros, prefix the number of operands with _
16252 e.g. _3. */
16253 #define OPS_1(a) { a, }
16254 #define OPS_2(a,b) { a,b, }
16255 #define OPS_3(a,b,c) { a,b,c, }
16256 #define OPS_4(a,b,c,d) { a,b,c,d, }
16257 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
16258 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
16259
16260 /* These macros abstract out the exact format of the mnemonic table and
16261 save some repeated characters. */
16262
16263 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
16264 #define TxCE(mnem, op, top, nops, ops, ae, te) \
16265 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16266 THUMB_VARIANT, do_##ae, do_##te }
16267
16268 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16269 a T_MNEM_xyz enumerator. */
16270 #define TCE(mnem, aop, top, nops, ops, ae, te) \
16271 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16272 #define tCE(mnem, aop, top, nops, ops, ae, te) \
16273 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16274
16275 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16276 infix after the third character. */
16277 #define TxC3(mnem, op, top, nops, ops, ae, te) \
16278 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16279 THUMB_VARIANT, do_##ae, do_##te }
16280 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
16281 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16282 THUMB_VARIANT, do_##ae, do_##te }
16283 #define TC3(mnem, aop, top, nops, ops, ae, te) \
16284 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16285 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
16286 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16287 #define tC3(mnem, aop, top, nops, ops, ae, te) \
16288 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16289 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16290 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16291
16292 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16293 appear in the condition table. */
16294 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16295 { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16296 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16297
16298 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16299 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16300 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16301 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16302 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16303 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16304 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16305 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16306 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16307 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16308 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16309 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16310 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16311 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16312 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16313 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16314 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16315 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16316 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16317 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16318
16319 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16320 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16321 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16322 TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
16323
16324 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16325 field is still 0xE. Many of the Thumb variants can be executed
16326 conditionally, so this is checked separately. */
16327 #define TUE(mnem, op, top, nops, ops, ae, te) \
16328 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16329 THUMB_VARIANT, do_##ae, do_##te }
16330
16331 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16332 condition code field. */
16333 #define TUF(mnem, op, top, nops, ops, ae, te) \
16334 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16335 THUMB_VARIANT, do_##ae, do_##te }
16336
16337 /* ARM-only variants of all the above. */
16338 #define CE(mnem, op, nops, ops, ae) \
16339 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16340
16341 #define C3(mnem, op, nops, ops, ae) \
16342 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16343
16344 /* Legacy mnemonics that always have conditional infix after the third
16345 character. */
16346 #define CL(mnem, op, nops, ops, ae) \
16347 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16348 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16349
16350 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16351 #define cCE(mnem, op, nops, ops, ae) \
16352 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16353
16354 /* Legacy coprocessor instructions where conditional infix and conditional
16355 suffix are ambiguous. For consistency this includes all FPA instructions,
16356 not just the potentially ambiguous ones. */
16357 #define cCL(mnem, op, nops, ops, ae) \
16358 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16359 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16360
16361 /* Coprocessor, takes either a suffix or a position-3 infix
16362 (for an FPA corner case). */
16363 #define C3E(mnem, op, nops, ops, ae) \
16364 { mnem, OPS##nops ops, OT_csuf_or_in3, \
16365 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16366
16367 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16368 { m1 #m2 m3, OPS##nops ops, \
16369 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16370 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16371
16372 #define CM(m1, m2, op, nops, ops, ae) \
16373 xCM_ (m1, , m2, op, nops, ops, ae), \
16374 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16375 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16376 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16377 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16378 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16379 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16380 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16381 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16382 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16383 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16384 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16385 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16386 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16387 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16388 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16389 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16390 xCM_ (m1, le, m2, op, nops, ops, ae), \
16391 xCM_ (m1, al, m2, op, nops, ops, ae)
16392
16393 #define UE(mnem, op, nops, ops, ae) \
16394 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16395
16396 #define UF(mnem, op, nops, ops, ae) \
16397 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16398
16399 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16400 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16401 use the same encoding function for each. */
16402 #define NUF(mnem, op, nops, ops, enc) \
16403 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16404 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16405
16406 /* Neon data processing, version which indirects through neon_enc_tab for
16407 the various overloaded versions of opcodes. */
16408 #define nUF(mnem, op, nops, ops, enc) \
16409 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
16410 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16411
16412 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16413 version. */
16414 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16415 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16416 THUMB_VARIANT, do_##enc, do_##enc }
16417
16418 #define NCE(mnem, op, nops, ops, enc) \
16419 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16420
16421 #define NCEF(mnem, op, nops, ops, enc) \
16422 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16423
16424 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16425 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16426 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
16427 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16428
16429 #define nCE(mnem, op, nops, ops, enc) \
16430 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16431
16432 #define nCEF(mnem, op, nops, ops, enc) \
16433 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16434
16435 #define do_0 0
16436
16437 /* Thumb-only, unconditional. */
16438 #define UT(mnem, op, nops, ops, te) TUE (mnem, 0, op, nops, ops, 0, te)
16439
16440 static const struct asm_opcode insns[] =
16441 {
16442 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16443 #define THUMB_VARIANT &arm_ext_v4t
16444 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
16445 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
16446 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
16447 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
16448 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
16449 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
16450 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
16451 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
16452 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
16453 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
16454 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
16455 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
16456 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
16457 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
16458 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
16459 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
16460
16461 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16462 for setting PSR flag bits. They are obsolete in V6 and do not
16463 have Thumb equivalents. */
16464 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
16465 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
16466 CL("tstp", 110f000, 2, (RR, SH), cmp),
16467 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
16468 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
16469 CL("cmpp", 150f000, 2, (RR, SH), cmp),
16470 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
16471 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
16472 CL("cmnp", 170f000, 2, (RR, SH), cmp),
16473
16474 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
16475 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
16476 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
16477 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
16478
16479 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
16480 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
16481 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
16482 OP_RRnpc),
16483 OP_ADDRGLDR),ldst, t_ldst),
16484 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
16485
16486 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16487 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16488 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16489 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16490 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16491 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16492
16493 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
16494 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
16495 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
16496 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
16497
16498 /* Pseudo ops. */
16499 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
16500 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
16501 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
16502
16503 /* Thumb-compatibility pseudo ops. */
16504 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
16505 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
16506 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
16507 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
16508 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
16509 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
16510 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
16511 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
16512 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
16513 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
16514 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
16515 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
16516
16517 /* These may simplify to neg. */
16518 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
16519 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
16520
16521 #undef THUMB_VARIANT
16522 #define THUMB_VARIANT & arm_ext_v6
16523
16524 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
16525
16526 /* V1 instructions with no Thumb analogue prior to V6T2. */
16527 #undef THUMB_VARIANT
16528 #define THUMB_VARIANT & arm_ext_v6t2
16529
16530 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16531 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16532 CL("teqp", 130f000, 2, (RR, SH), cmp),
16533
16534 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
16535 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
16536 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
16537 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
16538
16539 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16540 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16541
16542 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16543 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16544
16545 /* V1 instructions with no Thumb analogue at all. */
16546 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
16547 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
16548
16549 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
16550 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
16551 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
16552 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
16553 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
16554 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
16555 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
16556 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
16557
16558 #undef ARM_VARIANT
16559 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16560 #undef THUMB_VARIANT
16561 #define THUMB_VARIANT & arm_ext_v4t
16562
16563 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16564 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16565
16566 #undef THUMB_VARIANT
16567 #define THUMB_VARIANT & arm_ext_v6t2
16568
16569 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
16570 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
16571
16572 /* Generic coprocessor instructions. */
16573 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16574 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16575 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16576 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16577 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16578 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16579 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16580
16581 #undef ARM_VARIANT
16582 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16583
16584 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16585 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16586
16587 #undef ARM_VARIANT
16588 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16589 #undef THUMB_VARIANT
16590 #define THUMB_VARIANT & arm_ext_msr
16591
16592 TCE("mrs", 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
16593 TCE("msr", 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
16594
16595 #undef ARM_VARIANT
16596 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16597 #undef THUMB_VARIANT
16598 #define THUMB_VARIANT & arm_ext_v6t2
16599
16600 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16601 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16602 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16603 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16604 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16605 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16606 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16607 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16608
16609 #undef ARM_VARIANT
16610 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16611 #undef THUMB_VARIANT
16612 #define THUMB_VARIANT & arm_ext_v4t
16613
16614 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16615 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16616 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16617 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16618 tCM("ld","sh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16619 tCM("ld","sb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
16620
16621 #undef ARM_VARIANT
16622 #define ARM_VARIANT & arm_ext_v4t_5
16623
16624 /* ARM Architecture 4T. */
16625 /* Note: bx (and blx) are required on V5, even if the processor does
16626 not support Thumb. */
16627 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
16628
16629 #undef ARM_VARIANT
16630 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16631 #undef THUMB_VARIANT
16632 #define THUMB_VARIANT & arm_ext_v5t
16633
16634 /* Note: blx has 2 variants; the .value coded here is for
16635 BLX(2). Only this variant has conditional execution. */
16636 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
16637 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
16638
16639 #undef THUMB_VARIANT
16640 #define THUMB_VARIANT & arm_ext_v6t2
16641
16642 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
16643 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16644 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16645 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16646 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16647 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16648 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16649 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16650
16651 #undef ARM_VARIANT
16652 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16653 #undef THUMB_VARIANT
16654 #define THUMB_VARIANT &arm_ext_v5exp
16655
16656 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16657 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16658 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16659 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16660
16661 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16662 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16663
16664 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16665 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16666 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16667 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16668
16669 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16670 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16671 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16672 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16673
16674 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16675 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16676
16677 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16678 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16679 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16680 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16681
16682 #undef ARM_VARIANT
16683 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16684 #undef THUMB_VARIANT
16685 #define THUMB_VARIANT &arm_ext_v6t2
16686
16687 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
16688 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
16689 ldrd, t_ldstd),
16690 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
16691 ADDRGLDRS), ldrd, t_ldstd),
16692
16693 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16694 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16695
16696 #undef ARM_VARIANT
16697 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16698
16699 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
16700
16701 #undef ARM_VARIANT
16702 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16703 #undef THUMB_VARIANT
16704 #define THUMB_VARIANT & arm_ext_v6
16705
16706 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
16707 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
16708 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16709 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16710 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16711 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16712 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16713 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16714 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16715 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
16716
16717 #undef THUMB_VARIANT
16718 #define THUMB_VARIANT & arm_ext_v6t2
16719
16720 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
16721 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
16722 strex, t_strex),
16723 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16724 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16725
16726 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
16727 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
16728
16729 /* ARM V6 not included in V7M. */
16730 #undef THUMB_VARIANT
16731 #define THUMB_VARIANT & arm_ext_v6_notm
16732 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
16733 UF(rfeib, 9900a00, 1, (RRw), rfe),
16734 UF(rfeda, 8100a00, 1, (RRw), rfe),
16735 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
16736 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
16737 UF(rfefa, 9900a00, 1, (RRw), rfe),
16738 UF(rfeea, 8100a00, 1, (RRw), rfe),
16739 TUF("rfeed", 9100a00, e810c000, 1, (RRw), rfe, rfe),
16740 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
16741 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
16742 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
16743 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
16744
16745 /* ARM V6 not included in V7M (eg. integer SIMD). */
16746 #undef THUMB_VARIANT
16747 #define THUMB_VARIANT & arm_ext_v6_dsp
16748 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
16749 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
16750 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
16751 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16752 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16753 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16754 /* Old name for QASX. */
16755 TCE("qaddsubx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16756 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16757 /* Old name for QSAX. */
16758 TCE("qsubaddx", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16759 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16760 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16761 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16762 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16763 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16764 /* Old name for SASX. */
16765 TCE("saddsubx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16766 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16767 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16768 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16769 /* Old name for SHASX. */
16770 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16771 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16772 /* Old name for SHSAX. */
16773 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16774 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16775 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16776 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16777 /* Old name for SSAX. */
16778 TCE("ssubaddx", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16779 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16780 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16781 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16782 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16783 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16784 /* Old name for UASX. */
16785 TCE("uaddsubx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16786 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16787 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16788 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16789 /* Old name for UHASX. */
16790 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16791 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16792 /* Old name for UHSAX. */
16793 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16794 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16795 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16796 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16797 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16798 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16799 /* Old name for UQASX. */
16800 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16801 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16802 /* Old name for UQSAX. */
16803 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16804 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16805 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16806 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16807 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16808 /* Old name for USAX. */
16809 TCE("usubaddx", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16810 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16811 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16812 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16813 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16814 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16815 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16816 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16817 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16818 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16819 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16820 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16821 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16822 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16823 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16824 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16825 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16826 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16827 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16828 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16829 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16830 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16831 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16832 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16833 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16834 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16835 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16836 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16837 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16838 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
16839 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
16840 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16841 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16842 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
16843
16844 #undef ARM_VARIANT
16845 #define ARM_VARIANT & arm_ext_v6k
16846 #undef THUMB_VARIANT
16847 #define THUMB_VARIANT & arm_ext_v6k
16848
16849 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
16850 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
16851 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
16852 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
16853
16854 #undef THUMB_VARIANT
16855 #define THUMB_VARIANT & arm_ext_v6_notm
16856 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
16857 ldrexd, t_ldrexd),
16858 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
16859 RRnpcb), strexd, t_strexd),
16860
16861 #undef THUMB_VARIANT
16862 #define THUMB_VARIANT & arm_ext_v6t2
16863 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
16864 rd_rn, rd_rn),
16865 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
16866 rd_rn, rd_rn),
16867 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
16868 strex, rm_rd_rn),
16869 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
16870 strex, rm_rd_rn),
16871 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
16872
16873 #undef ARM_VARIANT
16874 #define ARM_VARIANT & arm_ext_v6z
16875
16876 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
16877
16878 #undef ARM_VARIANT
16879 #define ARM_VARIANT & arm_ext_v6t2
16880
16881 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
16882 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
16883 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
16884 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
16885
16886 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
16887 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
16888 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
16889 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
16890
16891 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
16892 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
16893 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
16894 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
16895
16896 UT("cbnz", b900, 2, (RR, EXP), t_cbz),
16897 UT("cbz", b100, 2, (RR, EXP), t_cbz),
16898
16899 /* ARM does not really have an IT instruction, so always allow it.
16900 The opcode is copied from Thumb in order to allow warnings in
16901 -mimplicit-it=[never | arm] modes. */
16902 #undef ARM_VARIANT
16903 #define ARM_VARIANT & arm_ext_v1
16904
16905 TUE("it", bf08, bf08, 1, (COND), it, t_it),
16906 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
16907 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
16908 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
16909 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
16910 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
16911 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
16912 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
16913 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
16914 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
16915 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
16916 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
16917 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
16918 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
16919 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
16920 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
16921 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
16922 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
16923
16924 /* Thumb2 only instructions. */
16925 #undef ARM_VARIANT
16926 #define ARM_VARIANT NULL
16927
16928 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
16929 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
16930 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
16931 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
16932 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
16933 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
16934
16935 /* Thumb-2 hardware division instructions (R and M profiles only). */
16936 #undef THUMB_VARIANT
16937 #define THUMB_VARIANT & arm_ext_div
16938
16939 TCE("sdiv", 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
16940 TCE("udiv", 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
16941
16942 /* ARM V6M/V7 instructions. */
16943 #undef ARM_VARIANT
16944 #define ARM_VARIANT & arm_ext_barrier
16945 #undef THUMB_VARIANT
16946 #define THUMB_VARIANT & arm_ext_barrier
16947
16948 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
16949 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
16950 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
16951
16952 /* ARM V7 instructions. */
16953 #undef ARM_VARIANT
16954 #define ARM_VARIANT & arm_ext_v7
16955 #undef THUMB_VARIANT
16956 #define THUMB_VARIANT & arm_ext_v7
16957
16958 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
16959 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
16960
16961 #undef ARM_VARIANT
16962 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
16963
16964 cCE("wfs", e200110, 1, (RR), rd),
16965 cCE("rfs", e300110, 1, (RR), rd),
16966 cCE("wfc", e400110, 1, (RR), rd),
16967 cCE("rfc", e500110, 1, (RR), rd),
16968
16969 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
16970 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
16971 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
16972 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
16973
16974 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
16975 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
16976 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
16977 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
16978
16979 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
16980 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
16981 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
16982 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
16983 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
16984 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
16985 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
16986 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
16987 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
16988 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
16989 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
16990 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
16991
16992 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
16993 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
16994 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
16995 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
16996 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
16997 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
16998 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
16999 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
17000 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
17001 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
17002 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
17003 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
17004
17005 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
17006 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
17007 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
17008 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
17009 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
17010 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
17011 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
17012 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
17013 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
17014 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
17015 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
17016 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
17017
17018 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
17019 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
17020 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
17021 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
17022 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
17023 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
17024 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
17025 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
17026 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
17027 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
17028 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
17029 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
17030
17031 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
17032 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
17033 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
17034 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
17035 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
17036 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
17037 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
17038 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
17039 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
17040 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
17041 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
17042 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
17043
17044 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
17045 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
17046 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
17047 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
17048 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
17049 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
17050 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
17051 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
17052 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
17053 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
17054 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
17055 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
17056
17057 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
17058 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
17059 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
17060 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
17061 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
17062 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
17063 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
17064 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
17065 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
17066 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
17067 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
17068 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
17069
17070 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
17071 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
17072 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
17073 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
17074 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
17075 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
17076 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
17077 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
17078 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
17079 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
17080 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
17081 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
17082
17083 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
17084 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
17085 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
17086 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
17087 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
17088 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
17089 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
17090 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
17091 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
17092 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
17093 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
17094 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
17095
17096 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
17097 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
17098 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
17099 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
17100 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
17101 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
17102 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
17103 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
17104 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
17105 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
17106 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
17107 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
17108
17109 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
17110 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
17111 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
17112 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
17113 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
17114 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
17115 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
17116 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
17117 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
17118 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
17119 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
17120 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
17121
17122 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
17123 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
17124 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
17125 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
17126 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
17127 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
17128 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
17129 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
17130 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
17131 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
17132 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
17133 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
17134
17135 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
17136 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
17137 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
17138 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
17139 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
17140 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
17141 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
17142 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
17143 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
17144 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
17145 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
17146 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
17147
17148 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
17149 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
17150 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
17151 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
17152 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
17153 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
17154 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
17155 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
17156 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
17157 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
17158 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
17159 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
17160
17161 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
17162 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
17163 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
17164 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
17165 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
17166 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
17167 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
17168 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
17169 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
17170 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
17171 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
17172 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
17173
17174 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
17175 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
17176 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
17177 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
17178 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
17179 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
17180 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
17181 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
17182 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
17183 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
17184 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
17185 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
17186
17187 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
17188 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
17189 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
17190 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
17191 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
17192 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17193 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17194 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17195 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
17196 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
17197 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
17198 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
17199
17200 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
17201 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
17202 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
17203 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
17204 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
17205 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17206 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17207 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17208 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
17209 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
17210 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
17211 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
17212
17213 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
17214 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
17215 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
17216 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
17217 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
17218 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17219 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17220 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17221 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
17222 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
17223 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
17224 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
17225
17226 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
17227 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
17228 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
17229 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
17230 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
17231 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17232 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17233 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17234 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
17235 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
17236 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
17237 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
17238
17239 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
17240 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
17241 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
17242 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
17243 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
17244 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17245 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17246 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17247 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
17248 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
17249 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
17250 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
17251
17252 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
17253 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
17254 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
17255 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
17256 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
17257 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17258 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17259 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17260 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
17261 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
17262 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
17263 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
17264
17265 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
17266 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
17267 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
17268 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
17269 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
17270 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17271 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17272 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17273 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
17274 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
17275 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
17276 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
17277
17278 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
17279 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
17280 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
17281 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
17282 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
17283 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17284 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17285 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17286 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
17287 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
17288 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
17289 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
17290
17291 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
17292 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
17293 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
17294 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
17295 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
17296 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17297 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17298 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17299 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
17300 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
17301 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
17302 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
17303
17304 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
17305 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
17306 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
17307 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
17308 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
17309 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17310 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17311 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17312 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
17313 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
17314 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
17315 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
17316
17317 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17318 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17319 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17320 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17321 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17322 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17323 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17324 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17325 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17326 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17327 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17328 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17329
17330 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17331 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17332 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17333 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17334 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17335 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17336 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17337 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17338 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17339 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17340 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17341 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17342
17343 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17344 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17345 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17346 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17347 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17348 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17349 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17350 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17351 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17352 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17353 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17354 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17355
17356 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
17357 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
17358 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
17359 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
17360
17361 cCL("flts", e000110, 2, (RF, RR), rn_rd),
17362 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
17363 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
17364 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
17365 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
17366 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
17367 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
17368 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
17369 cCL("flte", e080110, 2, (RF, RR), rn_rd),
17370 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
17371 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
17372 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
17373
17374 /* The implementation of the FIX instruction is broken on some
17375 assemblers, in that it accepts a precision specifier as well as a
17376 rounding specifier, despite the fact that this is meaningless.
17377 To be more compatible, we accept it as well, though of course it
17378 does not set any bits. */
17379 cCE("fix", e100110, 2, (RR, RF), rd_rm),
17380 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
17381 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
17382 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
17383 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
17384 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
17385 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
17386 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
17387 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
17388 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
17389 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
17390 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
17391 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
17392
17393 /* Instructions that were new with the real FPA, call them V2. */
17394 #undef ARM_VARIANT
17395 #define ARM_VARIANT & fpu_fpa_ext_v2
17396
17397 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17398 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17399 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17400 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17401 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17402 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17403
17404 #undef ARM_VARIANT
17405 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17406
17407 /* Moves and type conversions. */
17408 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
17409 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
17410 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
17411 cCE("fmstat", ef1fa10, 0, (), noargs),
17412 cCE("vmrs", ef10a10, 2, (APSR_RR, RVC), vmrs),
17413 cCE("vmsr", ee10a10, 2, (RVC, RR), vmsr),
17414 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
17415 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
17416 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
17417 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17418 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
17419 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17420 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
17421 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
17422
17423 /* Memory operations. */
17424 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17425 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17426 cCE("fldmias", c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17427 cCE("fldmfds", c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17428 cCE("fldmdbs", d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17429 cCE("fldmeas", d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17430 cCE("fldmiax", c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17431 cCE("fldmfdx", c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17432 cCE("fldmdbx", d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17433 cCE("fldmeax", d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17434 cCE("fstmias", c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17435 cCE("fstmeas", c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17436 cCE("fstmdbs", d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17437 cCE("fstmfds", d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17438 cCE("fstmiax", c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17439 cCE("fstmeax", c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17440 cCE("fstmdbx", d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17441 cCE("fstmfdx", d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17442
17443 /* Monadic operations. */
17444 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
17445 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
17446 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
17447
17448 /* Dyadic operations. */
17449 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17450 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17451 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17452 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17453 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17454 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17455 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17456 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17457 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17458
17459 /* Comparisons. */
17460 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
17461 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
17462 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
17463 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
17464
17465 /* Double precision load/store are still present on single precision
17466 implementations. */
17467 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17468 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17469 cCE("fldmiad", c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17470 cCE("fldmfdd", c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17471 cCE("fldmdbd", d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17472 cCE("fldmead", d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17473 cCE("fstmiad", c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17474 cCE("fstmead", c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17475 cCE("fstmdbd", d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17476 cCE("fstmfdd", d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17477
17478 #undef ARM_VARIANT
17479 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17480
17481 /* Moves and type conversions. */
17482 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17483 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17484 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17485 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
17486 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
17487 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
17488 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
17489 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17490 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
17491 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17492 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17493 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17494 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17495
17496 /* Monadic operations. */
17497 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17498 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17499 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17500
17501 /* Dyadic operations. */
17502 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17503 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17504 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17505 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17506 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17507 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17508 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17509 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17510 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17511
17512 /* Comparisons. */
17513 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17514 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
17515 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17516 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
17517
17518 #undef ARM_VARIANT
17519 #define ARM_VARIANT & fpu_vfp_ext_v2
17520
17521 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
17522 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
17523 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
17524 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
17525
17526 /* Instructions which may belong to either the Neon or VFP instruction sets.
17527 Individual encoder functions perform additional architecture checks. */
17528 #undef ARM_VARIANT
17529 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17530 #undef THUMB_VARIANT
17531 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17532
17533 /* These mnemonics are unique to VFP. */
17534 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
17535 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
17536 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17537 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17538 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17539 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17540 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17541 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
17542 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
17543 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
17544
17545 /* Mnemonics shared by Neon and VFP. */
17546 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
17547 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17548 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17549
17550 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17551 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17552
17553 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17554 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17555
17556 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17557 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17558 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17559 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17560 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17561 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17562 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17563 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17564
17565 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
17566 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
17567 nCEF(vcvtb, _vcvt, 2, (RVS, RVS), neon_cvtb),
17568 nCEF(vcvtt, _vcvt, 2, (RVS, RVS), neon_cvtt),
17569
17570
17571 /* NOTE: All VMOV encoding is special-cased! */
17572 NCE(vmov, 0, 1, (VMOV), neon_mov),
17573 NCE(vmovq, 0, 1, (VMOV), neon_mov),
17574
17575 #undef THUMB_VARIANT
17576 #define THUMB_VARIANT & fpu_neon_ext_v1
17577 #undef ARM_VARIANT
17578 #define ARM_VARIANT & fpu_neon_ext_v1
17579
17580 /* Data processing with three registers of the same length. */
17581 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17582 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
17583 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
17584 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17585 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17586 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17587 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17588 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17589 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17590 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17591 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17592 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17593 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17594 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17595 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17596 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17597 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17598 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17599 /* If not immediate, fall back to neon_dyadic_i64_su.
17600 shl_imm should accept I8 I16 I32 I64,
17601 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17602 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
17603 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
17604 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
17605 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
17606 /* Logic ops, types optional & ignored. */
17607 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
17608 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
17609 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
17610 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
17611 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
17612 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
17613 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
17614 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
17615 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
17616 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
17617 /* Bitfield ops, untyped. */
17618 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17619 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17620 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17621 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17622 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17623 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17624 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17625 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17626 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17627 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17628 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17629 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17630 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17631 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17632 back to neon_dyadic_if_su. */
17633 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17634 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17635 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17636 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17637 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17638 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17639 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17640 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17641 /* Comparison. Type I8 I16 I32 F32. */
17642 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
17643 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
17644 /* As above, D registers only. */
17645 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17646 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17647 /* Int and float variants, signedness unimportant. */
17648 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17649 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17650 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
17651 /* Add/sub take types I8 I16 I32 I64 F32. */
17652 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17653 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17654 /* vtst takes sizes 8, 16, 32. */
17655 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
17656 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
17657 /* VMUL takes I8 I16 I32 F32 P8. */
17658 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
17659 /* VQD{R}MULH takes S16 S32. */
17660 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17661 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17662 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17663 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17664 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17665 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17666 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17667 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17668 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17669 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17670 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17671 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17672 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17673 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17674 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17675 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17676
17677 /* Two address, int/float. Types S8 S16 S32 F32. */
17678 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
17679 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
17680
17681 /* Data processing with two registers and a shift amount. */
17682 /* Right shifts, and variants with rounding.
17683 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17684 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17685 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17686 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17687 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17688 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17689 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17690 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17691 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17692 /* Shift and insert. Sizes accepted 8 16 32 64. */
17693 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
17694 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
17695 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
17696 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
17697 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17698 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
17699 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
17700 /* Right shift immediate, saturating & narrowing, with rounding variants.
17701 Types accepted S16 S32 S64 U16 U32 U64. */
17702 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17703 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17704 /* As above, unsigned. Types accepted S16 S32 S64. */
17705 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17706 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17707 /* Right shift narrowing. Types accepted I16 I32 I64. */
17708 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17709 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17710 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17711 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
17712 /* CVT with optional immediate for fixed-point variant. */
17713 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
17714
17715 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
17716 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
17717
17718 /* Data processing, three registers of different lengths. */
17719 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17720 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
17721 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
17722 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
17723 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
17724 /* If not scalar, fall back to neon_dyadic_long.
17725 Vector types as above, scalar types S16 S32 U16 U32. */
17726 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17727 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17728 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17729 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17730 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17731 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17732 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17733 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17734 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17735 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17736 /* Saturating doubling multiplies. Types S16 S32. */
17737 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17738 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17739 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17740 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17741 S16 S32 U16 U32. */
17742 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
17743
17744 /* Extract. Size 8. */
17745 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
17746 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
17747
17748 /* Two registers, miscellaneous. */
17749 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17750 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
17751 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
17752 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
17753 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
17754 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
17755 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
17756 /* Vector replicate. Sizes 8 16 32. */
17757 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
17758 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
17759 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17760 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
17761 /* VMOVN. Types I16 I32 I64. */
17762 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
17763 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17764 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
17765 /* VQMOVUN. Types S16 S32 S64. */
17766 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
17767 /* VZIP / VUZP. Sizes 8 16 32. */
17768 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
17769 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
17770 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
17771 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
17772 /* VQABS / VQNEG. Types S8 S16 S32. */
17773 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
17774 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
17775 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
17776 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
17777 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
17778 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
17779 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
17780 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
17781 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
17782 /* Reciprocal estimates. Types U32 F32. */
17783 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
17784 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
17785 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
17786 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
17787 /* VCLS. Types S8 S16 S32. */
17788 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
17789 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
17790 /* VCLZ. Types I8 I16 I32. */
17791 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
17792 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
17793 /* VCNT. Size 8. */
17794 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
17795 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
17796 /* Two address, untyped. */
17797 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
17798 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
17799 /* VTRN. Sizes 8 16 32. */
17800 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
17801 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
17802
17803 /* Table lookup. Size 8. */
17804 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
17805 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
17806
17807 #undef THUMB_VARIANT
17808 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
17809 #undef ARM_VARIANT
17810 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
17811
17812 /* Neon element/structure load/store. */
17813 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
17814 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
17815 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
17816 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
17817 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
17818 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
17819 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
17820 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
17821
17822 #undef THUMB_VARIANT
17823 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
17824 #undef ARM_VARIANT
17825 #define ARM_VARIANT &fpu_vfp_ext_v3xd
17826 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
17827 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17828 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17829 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17830 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17831 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17832 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17833 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17834 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17835
17836 #undef THUMB_VARIANT
17837 #define THUMB_VARIANT & fpu_vfp_ext_v3
17838 #undef ARM_VARIANT
17839 #define ARM_VARIANT & fpu_vfp_ext_v3
17840
17841 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
17842 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17843 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17844 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17845 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17846 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17847 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17848 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17849 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17850
17851 #undef ARM_VARIANT
17852 #define ARM_VARIANT &fpu_vfp_ext_fma
17853 #undef THUMB_VARIANT
17854 #define THUMB_VARIANT &fpu_vfp_ext_fma
17855 /* Mnemonics shared by Neon and VFP. These are included in the
17856 VFP FMA variant; NEON and VFP FMA always includes the NEON
17857 FMA instructions. */
17858 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
17859 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
17860 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
17861 the v form should always be used. */
17862 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17863 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17864 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17865 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17866 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17867 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17868
17869 #undef THUMB_VARIANT
17870 #undef ARM_VARIANT
17871 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
17872
17873 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17874 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17875 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17876 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17877 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17878 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17879 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
17880 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
17881
17882 #undef ARM_VARIANT
17883 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
17884
17885 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
17886 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
17887 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
17888 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
17889 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
17890 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
17891 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
17892 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
17893 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
17894 cCE("textrmub", e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17895 cCE("textrmuh", e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17896 cCE("textrmuw", e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17897 cCE("textrmsb", e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17898 cCE("textrmsh", e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17899 cCE("textrmsw", e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17900 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17901 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17902 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17903 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
17904 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
17905 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17906 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17907 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17908 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17909 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17910 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17911 cCE("tmovmskb", e100030, 2, (RR, RIWR), rd_rn),
17912 cCE("tmovmskh", e500030, 2, (RR, RIWR), rd_rn),
17913 cCE("tmovmskw", e900030, 2, (RR, RIWR), rd_rn),
17914 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
17915 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
17916 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
17917 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
17918 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
17919 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
17920 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
17921 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
17922 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17923 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17924 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17925 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17926 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17927 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17928 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17929 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17930 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17931 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
17932 cCE("walignr0", e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17933 cCE("walignr1", e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17934 cCE("walignr2", ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17935 cCE("walignr3", eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17936 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17937 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17938 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17939 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17940 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17941 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17942 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17943 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17944 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17945 cCE("wcmpgtub", e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17946 cCE("wcmpgtuh", e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17947 cCE("wcmpgtuw", e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17948 cCE("wcmpgtsb", e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17949 cCE("wcmpgtsh", e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17950 cCE("wcmpgtsw", eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17951 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17952 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17953 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
17954 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
17955 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17956 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17957 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17958 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17959 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17960 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17961 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17962 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17963 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17964 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17965 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17966 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17967 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17968 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17969 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17970 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17971 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17972 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17973 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
17974 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17975 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17976 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17977 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17978 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17979 cCE("wpackhss", e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17980 cCE("wpackhus", e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17981 cCE("wpackwss", eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17982 cCE("wpackwus", e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17983 cCE("wpackdss", ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17984 cCE("wpackdus", ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17985 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17986 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17987 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17988 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17989 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17990 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17991 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17992 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17993 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17994 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17995 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
17996 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17997 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17998 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17999 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18000 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18001 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18002 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18003 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18004 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18005 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18006 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18007 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18008 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18009 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18010 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18011 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18012 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18013 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18014 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18015 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18016 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
18017 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
18018 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18019 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18020 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18021 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18022 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18023 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18024 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18025 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18026 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18027 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
18028 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
18029 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
18030 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
18031 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
18032 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
18033 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18034 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18035 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18036 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
18037 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
18038 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
18039 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
18040 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
18041 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
18042 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18043 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18044 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18045 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18046 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
18047
18048 #undef ARM_VARIANT
18049 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
18050
18051 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
18052 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
18053 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
18054 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
18055 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
18056 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
18057 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18058 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18059 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18060 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18061 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18062 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18063 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18064 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18065 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18066 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18067 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18068 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18069 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18070 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18071 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
18072 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18073 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18074 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18075 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18076 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18077 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18078 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18079 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18080 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18081 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18082 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18083 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18084 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18085 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18086 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18087 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18088 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18089 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18090 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18091 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18092 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18093 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18094 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18095 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18096 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18097 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18098 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18099 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18100 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18101 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18102 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18103 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18104 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18105 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18106 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18107 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18108
18109 #undef ARM_VARIANT
18110 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
18111
18112 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
18113 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
18114 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
18115 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
18116 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
18117 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
18118 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
18119 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
18120 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
18121 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
18122 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
18123 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
18124 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
18125 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
18126 cCE("cfmv64lr", e000510, 2, (RMDX, RR), rn_rd),
18127 cCE("cfmvr64l", e100510, 2, (RR, RMDX), rd_rn),
18128 cCE("cfmv64hr", e000530, 2, (RMDX, RR), rn_rd),
18129 cCE("cfmvr64h", e100530, 2, (RR, RMDX), rd_rn),
18130 cCE("cfmval32", e200440, 2, (RMAX, RMFX), rd_rn),
18131 cCE("cfmv32al", e100440, 2, (RMFX, RMAX), rd_rn),
18132 cCE("cfmvam32", e200460, 2, (RMAX, RMFX), rd_rn),
18133 cCE("cfmv32am", e100460, 2, (RMFX, RMAX), rd_rn),
18134 cCE("cfmvah32", e200480, 2, (RMAX, RMFX), rd_rn),
18135 cCE("cfmv32ah", e100480, 2, (RMFX, RMAX), rd_rn),
18136 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
18137 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
18138 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
18139 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
18140 cCE("cfmvsc32", e2004e0, 2, (RMDS, RMDX), mav_dspsc),
18141 cCE("cfmv32sc", e1004e0, 2, (RMDX, RMDS), rd),
18142 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
18143 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
18144 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
18145 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
18146 cCE("cfcvt32s", e000480, 2, (RMF, RMFX), rd_rn),
18147 cCE("cfcvt32d", e0004a0, 2, (RMD, RMFX), rd_rn),
18148 cCE("cfcvt64s", e0004c0, 2, (RMF, RMDX), rd_rn),
18149 cCE("cfcvt64d", e0004e0, 2, (RMD, RMDX), rd_rn),
18150 cCE("cfcvts32", e100580, 2, (RMFX, RMF), rd_rn),
18151 cCE("cfcvtd32", e1005a0, 2, (RMFX, RMD), rd_rn),
18152 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
18153 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
18154 cCE("cfrshl32", e000550, 3, (RMFX, RMFX, RR), mav_triple),
18155 cCE("cfrshl64", e000570, 3, (RMDX, RMDX, RR), mav_triple),
18156 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
18157 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
18158 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
18159 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
18160 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
18161 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
18162 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
18163 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
18164 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
18165 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
18166 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
18167 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
18168 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
18169 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
18170 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
18171 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
18172 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
18173 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
18174 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
18175 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
18176 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18177 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18178 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18179 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18180 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18181 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18182 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18183 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18184 cCE("cfmadd32", e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18185 cCE("cfmsub32", e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18186 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18187 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18188 };
18189 #undef ARM_VARIANT
18190 #undef THUMB_VARIANT
18191 #undef TCE
18192 #undef TCM
18193 #undef TUE
18194 #undef TUF
18195 #undef TCC
18196 #undef cCE
18197 #undef cCL
18198 #undef C3E
18199 #undef CE
18200 #undef CM
18201 #undef UE
18202 #undef UF
18203 #undef UT
18204 #undef NUF
18205 #undef nUF
18206 #undef NCE
18207 #undef nCE
18208 #undef OPS0
18209 #undef OPS1
18210 #undef OPS2
18211 #undef OPS3
18212 #undef OPS4
18213 #undef OPS5
18214 #undef OPS6
18215 #undef do_0
18216 \f
18217 /* MD interface: bits in the object file. */
18218
18219 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18220 for use in the a.out file, and stores them in the array pointed to by buf.
18221 This knows about the endian-ness of the target machine and does
18222 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
18223 2 (short) and 4 (long) Floating numbers are put out as a series of
18224 LITTLENUMS (shorts, here at least). */
18225
18226 void
18227 md_number_to_chars (char * buf, valueT val, int n)
18228 {
18229 if (target_big_endian)
18230 number_to_chars_bigendian (buf, val, n);
18231 else
18232 number_to_chars_littleendian (buf, val, n);
18233 }
18234
18235 static valueT
18236 md_chars_to_number (char * buf, int n)
18237 {
18238 valueT result = 0;
18239 unsigned char * where = (unsigned char *) buf;
18240
18241 if (target_big_endian)
18242 {
18243 while (n--)
18244 {
18245 result <<= 8;
18246 result |= (*where++ & 255);
18247 }
18248 }
18249 else
18250 {
18251 while (n--)
18252 {
18253 result <<= 8;
18254 result |= (where[n] & 255);
18255 }
18256 }
18257
18258 return result;
18259 }
18260
18261 /* MD interface: Sections. */
18262
18263 /* Estimate the size of a frag before relaxing. Assume everything fits in
18264 2 bytes. */
18265
18266 int
18267 md_estimate_size_before_relax (fragS * fragp,
18268 segT segtype ATTRIBUTE_UNUSED)
18269 {
18270 fragp->fr_var = 2;
18271 return 2;
18272 }
18273
18274 /* Convert a machine dependent frag. */
18275
18276 void
18277 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
18278 {
18279 unsigned long insn;
18280 unsigned long old_op;
18281 char *buf;
18282 expressionS exp;
18283 fixS *fixp;
18284 int reloc_type;
18285 int pc_rel;
18286 int opcode;
18287
18288 buf = fragp->fr_literal + fragp->fr_fix;
18289
18290 old_op = bfd_get_16(abfd, buf);
18291 if (fragp->fr_symbol)
18292 {
18293 exp.X_op = O_symbol;
18294 exp.X_add_symbol = fragp->fr_symbol;
18295 }
18296 else
18297 {
18298 exp.X_op = O_constant;
18299 }
18300 exp.X_add_number = fragp->fr_offset;
18301 opcode = fragp->fr_subtype;
18302 switch (opcode)
18303 {
18304 case T_MNEM_ldr_pc:
18305 case T_MNEM_ldr_pc2:
18306 case T_MNEM_ldr_sp:
18307 case T_MNEM_str_sp:
18308 case T_MNEM_ldr:
18309 case T_MNEM_ldrb:
18310 case T_MNEM_ldrh:
18311 case T_MNEM_str:
18312 case T_MNEM_strb:
18313 case T_MNEM_strh:
18314 if (fragp->fr_var == 4)
18315 {
18316 insn = THUMB_OP32 (opcode);
18317 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
18318 {
18319 insn |= (old_op & 0x700) << 4;
18320 }
18321 else
18322 {
18323 insn |= (old_op & 7) << 12;
18324 insn |= (old_op & 0x38) << 13;
18325 }
18326 insn |= 0x00000c00;
18327 put_thumb32_insn (buf, insn);
18328 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
18329 }
18330 else
18331 {
18332 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
18333 }
18334 pc_rel = (opcode == T_MNEM_ldr_pc2);
18335 break;
18336 case T_MNEM_adr:
18337 if (fragp->fr_var == 4)
18338 {
18339 insn = THUMB_OP32 (opcode);
18340 insn |= (old_op & 0xf0) << 4;
18341 put_thumb32_insn (buf, insn);
18342 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
18343 }
18344 else
18345 {
18346 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18347 exp.X_add_number -= 4;
18348 }
18349 pc_rel = 1;
18350 break;
18351 case T_MNEM_mov:
18352 case T_MNEM_movs:
18353 case T_MNEM_cmp:
18354 case T_MNEM_cmn:
18355 if (fragp->fr_var == 4)
18356 {
18357 int r0off = (opcode == T_MNEM_mov
18358 || opcode == T_MNEM_movs) ? 0 : 8;
18359 insn = THUMB_OP32 (opcode);
18360 insn = (insn & 0xe1ffffff) | 0x10000000;
18361 insn |= (old_op & 0x700) << r0off;
18362 put_thumb32_insn (buf, insn);
18363 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18364 }
18365 else
18366 {
18367 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
18368 }
18369 pc_rel = 0;
18370 break;
18371 case T_MNEM_b:
18372 if (fragp->fr_var == 4)
18373 {
18374 insn = THUMB_OP32(opcode);
18375 put_thumb32_insn (buf, insn);
18376 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
18377 }
18378 else
18379 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
18380 pc_rel = 1;
18381 break;
18382 case T_MNEM_bcond:
18383 if (fragp->fr_var == 4)
18384 {
18385 insn = THUMB_OP32(opcode);
18386 insn |= (old_op & 0xf00) << 14;
18387 put_thumb32_insn (buf, insn);
18388 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
18389 }
18390 else
18391 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
18392 pc_rel = 1;
18393 break;
18394 case T_MNEM_add_sp:
18395 case T_MNEM_add_pc:
18396 case T_MNEM_inc_sp:
18397 case T_MNEM_dec_sp:
18398 if (fragp->fr_var == 4)
18399 {
18400 /* ??? Choose between add and addw. */
18401 insn = THUMB_OP32 (opcode);
18402 insn |= (old_op & 0xf0) << 4;
18403 put_thumb32_insn (buf, insn);
18404 if (opcode == T_MNEM_add_pc)
18405 reloc_type = BFD_RELOC_ARM_T32_IMM12;
18406 else
18407 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18408 }
18409 else
18410 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18411 pc_rel = 0;
18412 break;
18413
18414 case T_MNEM_addi:
18415 case T_MNEM_addis:
18416 case T_MNEM_subi:
18417 case T_MNEM_subis:
18418 if (fragp->fr_var == 4)
18419 {
18420 insn = THUMB_OP32 (opcode);
18421 insn |= (old_op & 0xf0) << 4;
18422 insn |= (old_op & 0xf) << 16;
18423 put_thumb32_insn (buf, insn);
18424 if (insn & (1 << 20))
18425 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18426 else
18427 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18428 }
18429 else
18430 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18431 pc_rel = 0;
18432 break;
18433 default:
18434 abort ();
18435 }
18436 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
18437 (enum bfd_reloc_code_real) reloc_type);
18438 fixp->fx_file = fragp->fr_file;
18439 fixp->fx_line = fragp->fr_line;
18440 fragp->fr_fix += fragp->fr_var;
18441 }
18442
18443 /* Return the size of a relaxable immediate operand instruction.
18444 SHIFT and SIZE specify the form of the allowable immediate. */
18445 static int
18446 relax_immediate (fragS *fragp, int size, int shift)
18447 {
18448 offsetT offset;
18449 offsetT mask;
18450 offsetT low;
18451
18452 /* ??? Should be able to do better than this. */
18453 if (fragp->fr_symbol)
18454 return 4;
18455
18456 low = (1 << shift) - 1;
18457 mask = (1 << (shift + size)) - (1 << shift);
18458 offset = fragp->fr_offset;
18459 /* Force misaligned offsets to 32-bit variant. */
18460 if (offset & low)
18461 return 4;
18462 if (offset & ~mask)
18463 return 4;
18464 return 2;
18465 }
18466
18467 /* Get the address of a symbol during relaxation. */
18468 static addressT
18469 relaxed_symbol_addr (fragS *fragp, long stretch)
18470 {
18471 fragS *sym_frag;
18472 addressT addr;
18473 symbolS *sym;
18474
18475 sym = fragp->fr_symbol;
18476 sym_frag = symbol_get_frag (sym);
18477 know (S_GET_SEGMENT (sym) != absolute_section
18478 || sym_frag == &zero_address_frag);
18479 addr = S_GET_VALUE (sym) + fragp->fr_offset;
18480
18481 /* If frag has yet to be reached on this pass, assume it will
18482 move by STRETCH just as we did. If this is not so, it will
18483 be because some frag between grows, and that will force
18484 another pass. */
18485
18486 if (stretch != 0
18487 && sym_frag->relax_marker != fragp->relax_marker)
18488 {
18489 fragS *f;
18490
18491 /* Adjust stretch for any alignment frag. Note that if have
18492 been expanding the earlier code, the symbol may be
18493 defined in what appears to be an earlier frag. FIXME:
18494 This doesn't handle the fr_subtype field, which specifies
18495 a maximum number of bytes to skip when doing an
18496 alignment. */
18497 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
18498 {
18499 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
18500 {
18501 if (stretch < 0)
18502 stretch = - ((- stretch)
18503 & ~ ((1 << (int) f->fr_offset) - 1));
18504 else
18505 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
18506 if (stretch == 0)
18507 break;
18508 }
18509 }
18510 if (f != NULL)
18511 addr += stretch;
18512 }
18513
18514 return addr;
18515 }
18516
18517 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18518 load. */
18519 static int
18520 relax_adr (fragS *fragp, asection *sec, long stretch)
18521 {
18522 addressT addr;
18523 offsetT val;
18524
18525 /* Assume worst case for symbols not known to be in the same section. */
18526 if (fragp->fr_symbol == NULL
18527 || !S_IS_DEFINED (fragp->fr_symbol)
18528 || sec != S_GET_SEGMENT (fragp->fr_symbol))
18529 return 4;
18530
18531 val = relaxed_symbol_addr (fragp, stretch);
18532 addr = fragp->fr_address + fragp->fr_fix;
18533 addr = (addr + 4) & ~3;
18534 /* Force misaligned targets to 32-bit variant. */
18535 if (val & 3)
18536 return 4;
18537 val -= addr;
18538 if (val < 0 || val > 1020)
18539 return 4;
18540 return 2;
18541 }
18542
18543 /* Return the size of a relaxable add/sub immediate instruction. */
18544 static int
18545 relax_addsub (fragS *fragp, asection *sec)
18546 {
18547 char *buf;
18548 int op;
18549
18550 buf = fragp->fr_literal + fragp->fr_fix;
18551 op = bfd_get_16(sec->owner, buf);
18552 if ((op & 0xf) == ((op >> 4) & 0xf))
18553 return relax_immediate (fragp, 8, 0);
18554 else
18555 return relax_immediate (fragp, 3, 0);
18556 }
18557
18558
18559 /* Return the size of a relaxable branch instruction. BITS is the
18560 size of the offset field in the narrow instruction. */
18561
18562 static int
18563 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
18564 {
18565 addressT addr;
18566 offsetT val;
18567 offsetT limit;
18568
18569 /* Assume worst case for symbols not known to be in the same section. */
18570 if (!S_IS_DEFINED (fragp->fr_symbol)
18571 || sec != S_GET_SEGMENT (fragp->fr_symbol))
18572 return 4;
18573
18574 #ifdef OBJ_ELF
18575 if (S_IS_DEFINED (fragp->fr_symbol)
18576 && ARM_IS_FUNC (fragp->fr_symbol))
18577 return 4;
18578 #endif
18579
18580 val = relaxed_symbol_addr (fragp, stretch);
18581 addr = fragp->fr_address + fragp->fr_fix + 4;
18582 val -= addr;
18583
18584 /* Offset is a signed value *2 */
18585 limit = 1 << bits;
18586 if (val >= limit || val < -limit)
18587 return 4;
18588 return 2;
18589 }
18590
18591
18592 /* Relax a machine dependent frag. This returns the amount by which
18593 the current size of the frag should change. */
18594
18595 int
18596 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
18597 {
18598 int oldsize;
18599 int newsize;
18600
18601 oldsize = fragp->fr_var;
18602 switch (fragp->fr_subtype)
18603 {
18604 case T_MNEM_ldr_pc2:
18605 newsize = relax_adr (fragp, sec, stretch);
18606 break;
18607 case T_MNEM_ldr_pc:
18608 case T_MNEM_ldr_sp:
18609 case T_MNEM_str_sp:
18610 newsize = relax_immediate (fragp, 8, 2);
18611 break;
18612 case T_MNEM_ldr:
18613 case T_MNEM_str:
18614 newsize = relax_immediate (fragp, 5, 2);
18615 break;
18616 case T_MNEM_ldrh:
18617 case T_MNEM_strh:
18618 newsize = relax_immediate (fragp, 5, 1);
18619 break;
18620 case T_MNEM_ldrb:
18621 case T_MNEM_strb:
18622 newsize = relax_immediate (fragp, 5, 0);
18623 break;
18624 case T_MNEM_adr:
18625 newsize = relax_adr (fragp, sec, stretch);
18626 break;
18627 case T_MNEM_mov:
18628 case T_MNEM_movs:
18629 case T_MNEM_cmp:
18630 case T_MNEM_cmn:
18631 newsize = relax_immediate (fragp, 8, 0);
18632 break;
18633 case T_MNEM_b:
18634 newsize = relax_branch (fragp, sec, 11, stretch);
18635 break;
18636 case T_MNEM_bcond:
18637 newsize = relax_branch (fragp, sec, 8, stretch);
18638 break;
18639 case T_MNEM_add_sp:
18640 case T_MNEM_add_pc:
18641 newsize = relax_immediate (fragp, 8, 2);
18642 break;
18643 case T_MNEM_inc_sp:
18644 case T_MNEM_dec_sp:
18645 newsize = relax_immediate (fragp, 7, 2);
18646 break;
18647 case T_MNEM_addi:
18648 case T_MNEM_addis:
18649 case T_MNEM_subi:
18650 case T_MNEM_subis:
18651 newsize = relax_addsub (fragp, sec);
18652 break;
18653 default:
18654 abort ();
18655 }
18656
18657 fragp->fr_var = newsize;
18658 /* Freeze wide instructions that are at or before the same location as
18659 in the previous pass. This avoids infinite loops.
18660 Don't freeze them unconditionally because targets may be artificially
18661 misaligned by the expansion of preceding frags. */
18662 if (stretch <= 0 && newsize > 2)
18663 {
18664 md_convert_frag (sec->owner, sec, fragp);
18665 frag_wane (fragp);
18666 }
18667
18668 return newsize - oldsize;
18669 }
18670
18671 /* Round up a section size to the appropriate boundary. */
18672
18673 valueT
18674 md_section_align (segT segment ATTRIBUTE_UNUSED,
18675 valueT size)
18676 {
18677 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18678 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
18679 {
18680 /* For a.out, force the section size to be aligned. If we don't do
18681 this, BFD will align it for us, but it will not write out the
18682 final bytes of the section. This may be a bug in BFD, but it is
18683 easier to fix it here since that is how the other a.out targets
18684 work. */
18685 int align;
18686
18687 align = bfd_get_section_alignment (stdoutput, segment);
18688 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
18689 }
18690 #endif
18691
18692 return size;
18693 }
18694
18695 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18696 of an rs_align_code fragment. */
18697
18698 void
18699 arm_handle_align (fragS * fragP)
18700 {
18701 static char const arm_noop[2][2][4] =
18702 {
18703 { /* ARMv1 */
18704 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18705 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18706 },
18707 { /* ARMv6k */
18708 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18709 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18710 },
18711 };
18712 static char const thumb_noop[2][2][2] =
18713 {
18714 { /* Thumb-1 */
18715 {0xc0, 0x46}, /* LE */
18716 {0x46, 0xc0}, /* BE */
18717 },
18718 { /* Thumb-2 */
18719 {0x00, 0xbf}, /* LE */
18720 {0xbf, 0x00} /* BE */
18721 }
18722 };
18723 static char const wide_thumb_noop[2][4] =
18724 { /* Wide Thumb-2 */
18725 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18726 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18727 };
18728
18729 unsigned bytes, fix, noop_size;
18730 char * p;
18731 const char * noop;
18732 const char *narrow_noop = NULL;
18733 #ifdef OBJ_ELF
18734 enum mstate state;
18735 #endif
18736
18737 if (fragP->fr_type != rs_align_code)
18738 return;
18739
18740 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
18741 p = fragP->fr_literal + fragP->fr_fix;
18742 fix = 0;
18743
18744 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
18745 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
18746
18747 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
18748
18749 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
18750 {
18751 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
18752 {
18753 narrow_noop = thumb_noop[1][target_big_endian];
18754 noop = wide_thumb_noop[target_big_endian];
18755 }
18756 else
18757 noop = thumb_noop[0][target_big_endian];
18758 noop_size = 2;
18759 #ifdef OBJ_ELF
18760 state = MAP_THUMB;
18761 #endif
18762 }
18763 else
18764 {
18765 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
18766 [target_big_endian];
18767 noop_size = 4;
18768 #ifdef OBJ_ELF
18769 state = MAP_ARM;
18770 #endif
18771 }
18772
18773 fragP->fr_var = noop_size;
18774
18775 if (bytes & (noop_size - 1))
18776 {
18777 fix = bytes & (noop_size - 1);
18778 #ifdef OBJ_ELF
18779 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
18780 #endif
18781 memset (p, 0, fix);
18782 p += fix;
18783 bytes -= fix;
18784 }
18785
18786 if (narrow_noop)
18787 {
18788 if (bytes & noop_size)
18789 {
18790 /* Insert a narrow noop. */
18791 memcpy (p, narrow_noop, noop_size);
18792 p += noop_size;
18793 bytes -= noop_size;
18794 fix += noop_size;
18795 }
18796
18797 /* Use wide noops for the remainder */
18798 noop_size = 4;
18799 }
18800
18801 while (bytes >= noop_size)
18802 {
18803 memcpy (p, noop, noop_size);
18804 p += noop_size;
18805 bytes -= noop_size;
18806 fix += noop_size;
18807 }
18808
18809 fragP->fr_fix += fix;
18810 }
18811
18812 /* Called from md_do_align. Used to create an alignment
18813 frag in a code section. */
18814
18815 void
18816 arm_frag_align_code (int n, int max)
18817 {
18818 char * p;
18819
18820 /* We assume that there will never be a requirement
18821 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
18822 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
18823 {
18824 char err_msg[128];
18825
18826 sprintf (err_msg,
18827 _("alignments greater than %d bytes not supported in .text sections."),
18828 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
18829 as_fatal ("%s", err_msg);
18830 }
18831
18832 p = frag_var (rs_align_code,
18833 MAX_MEM_FOR_RS_ALIGN_CODE,
18834 1,
18835 (relax_substateT) max,
18836 (symbolS *) NULL,
18837 (offsetT) n,
18838 (char *) NULL);
18839 *p = 0;
18840 }
18841
18842 /* Perform target specific initialisation of a frag.
18843 Note - despite the name this initialisation is not done when the frag
18844 is created, but only when its type is assigned. A frag can be created
18845 and used a long time before its type is set, so beware of assuming that
18846 this initialisationis performed first. */
18847
18848 #ifndef OBJ_ELF
18849 void
18850 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
18851 {
18852 /* Record whether this frag is in an ARM or a THUMB area. */
18853 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18854 }
18855
18856 #else /* OBJ_ELF is defined. */
18857 void
18858 arm_init_frag (fragS * fragP, int max_chars)
18859 {
18860 /* If the current ARM vs THUMB mode has not already
18861 been recorded into this frag then do so now. */
18862 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
18863 {
18864 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18865
18866 /* Record a mapping symbol for alignment frags. We will delete this
18867 later if the alignment ends up empty. */
18868 switch (fragP->fr_type)
18869 {
18870 case rs_align:
18871 case rs_align_test:
18872 case rs_fill:
18873 mapping_state_2 (MAP_DATA, max_chars);
18874 break;
18875 case rs_align_code:
18876 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
18877 break;
18878 default:
18879 break;
18880 }
18881 }
18882 }
18883
18884 /* When we change sections we need to issue a new mapping symbol. */
18885
18886 void
18887 arm_elf_change_section (void)
18888 {
18889 /* Link an unlinked unwind index table section to the .text section. */
18890 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
18891 && elf_linked_to_section (now_seg) == NULL)
18892 elf_linked_to_section (now_seg) = text_section;
18893 }
18894
18895 int
18896 arm_elf_section_type (const char * str, size_t len)
18897 {
18898 if (len == 5 && strncmp (str, "exidx", 5) == 0)
18899 return SHT_ARM_EXIDX;
18900
18901 return -1;
18902 }
18903 \f
18904 /* Code to deal with unwinding tables. */
18905
18906 static void add_unwind_adjustsp (offsetT);
18907
18908 /* Generate any deferred unwind frame offset. */
18909
18910 static void
18911 flush_pending_unwind (void)
18912 {
18913 offsetT offset;
18914
18915 offset = unwind.pending_offset;
18916 unwind.pending_offset = 0;
18917 if (offset != 0)
18918 add_unwind_adjustsp (offset);
18919 }
18920
18921 /* Add an opcode to this list for this function. Two-byte opcodes should
18922 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
18923 order. */
18924
18925 static void
18926 add_unwind_opcode (valueT op, int length)
18927 {
18928 /* Add any deferred stack adjustment. */
18929 if (unwind.pending_offset)
18930 flush_pending_unwind ();
18931
18932 unwind.sp_restored = 0;
18933
18934 if (unwind.opcode_count + length > unwind.opcode_alloc)
18935 {
18936 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
18937 if (unwind.opcodes)
18938 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
18939 unwind.opcode_alloc);
18940 else
18941 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
18942 }
18943 while (length > 0)
18944 {
18945 length--;
18946 unwind.opcodes[unwind.opcode_count] = op & 0xff;
18947 op >>= 8;
18948 unwind.opcode_count++;
18949 }
18950 }
18951
18952 /* Add unwind opcodes to adjust the stack pointer. */
18953
18954 static void
18955 add_unwind_adjustsp (offsetT offset)
18956 {
18957 valueT op;
18958
18959 if (offset > 0x200)
18960 {
18961 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
18962 char bytes[5];
18963 int n;
18964 valueT o;
18965
18966 /* Long form: 0xb2, uleb128. */
18967 /* This might not fit in a word so add the individual bytes,
18968 remembering the list is built in reverse order. */
18969 o = (valueT) ((offset - 0x204) >> 2);
18970 if (o == 0)
18971 add_unwind_opcode (0, 1);
18972
18973 /* Calculate the uleb128 encoding of the offset. */
18974 n = 0;
18975 while (o)
18976 {
18977 bytes[n] = o & 0x7f;
18978 o >>= 7;
18979 if (o)
18980 bytes[n] |= 0x80;
18981 n++;
18982 }
18983 /* Add the insn. */
18984 for (; n; n--)
18985 add_unwind_opcode (bytes[n - 1], 1);
18986 add_unwind_opcode (0xb2, 1);
18987 }
18988 else if (offset > 0x100)
18989 {
18990 /* Two short opcodes. */
18991 add_unwind_opcode (0x3f, 1);
18992 op = (offset - 0x104) >> 2;
18993 add_unwind_opcode (op, 1);
18994 }
18995 else if (offset > 0)
18996 {
18997 /* Short opcode. */
18998 op = (offset - 4) >> 2;
18999 add_unwind_opcode (op, 1);
19000 }
19001 else if (offset < 0)
19002 {
19003 offset = -offset;
19004 while (offset > 0x100)
19005 {
19006 add_unwind_opcode (0x7f, 1);
19007 offset -= 0x100;
19008 }
19009 op = ((offset - 4) >> 2) | 0x40;
19010 add_unwind_opcode (op, 1);
19011 }
19012 }
19013
19014 /* Finish the list of unwind opcodes for this function. */
19015 static void
19016 finish_unwind_opcodes (void)
19017 {
19018 valueT op;
19019
19020 if (unwind.fp_used)
19021 {
19022 /* Adjust sp as necessary. */
19023 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
19024 flush_pending_unwind ();
19025
19026 /* After restoring sp from the frame pointer. */
19027 op = 0x90 | unwind.fp_reg;
19028 add_unwind_opcode (op, 1);
19029 }
19030 else
19031 flush_pending_unwind ();
19032 }
19033
19034
19035 /* Start an exception table entry. If idx is nonzero this is an index table
19036 entry. */
19037
19038 static void
19039 start_unwind_section (const segT text_seg, int idx)
19040 {
19041 const char * text_name;
19042 const char * prefix;
19043 const char * prefix_once;
19044 const char * group_name;
19045 size_t prefix_len;
19046 size_t text_len;
19047 char * sec_name;
19048 size_t sec_name_len;
19049 int type;
19050 int flags;
19051 int linkonce;
19052
19053 if (idx)
19054 {
19055 prefix = ELF_STRING_ARM_unwind;
19056 prefix_once = ELF_STRING_ARM_unwind_once;
19057 type = SHT_ARM_EXIDX;
19058 }
19059 else
19060 {
19061 prefix = ELF_STRING_ARM_unwind_info;
19062 prefix_once = ELF_STRING_ARM_unwind_info_once;
19063 type = SHT_PROGBITS;
19064 }
19065
19066 text_name = segment_name (text_seg);
19067 if (streq (text_name, ".text"))
19068 text_name = "";
19069
19070 if (strncmp (text_name, ".gnu.linkonce.t.",
19071 strlen (".gnu.linkonce.t.")) == 0)
19072 {
19073 prefix = prefix_once;
19074 text_name += strlen (".gnu.linkonce.t.");
19075 }
19076
19077 prefix_len = strlen (prefix);
19078 text_len = strlen (text_name);
19079 sec_name_len = prefix_len + text_len;
19080 sec_name = (char *) xmalloc (sec_name_len + 1);
19081 memcpy (sec_name, prefix, prefix_len);
19082 memcpy (sec_name + prefix_len, text_name, text_len);
19083 sec_name[prefix_len + text_len] = '\0';
19084
19085 flags = SHF_ALLOC;
19086 linkonce = 0;
19087 group_name = 0;
19088
19089 /* Handle COMDAT group. */
19090 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
19091 {
19092 group_name = elf_group_name (text_seg);
19093 if (group_name == NULL)
19094 {
19095 as_bad (_("Group section `%s' has no group signature"),
19096 segment_name (text_seg));
19097 ignore_rest_of_line ();
19098 return;
19099 }
19100 flags |= SHF_GROUP;
19101 linkonce = 1;
19102 }
19103
19104 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
19105
19106 /* Set the section link for index tables. */
19107 if (idx)
19108 elf_linked_to_section (now_seg) = text_seg;
19109 }
19110
19111
19112 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
19113 personality routine data. Returns zero, or the index table value for
19114 and inline entry. */
19115
19116 static valueT
19117 create_unwind_entry (int have_data)
19118 {
19119 int size;
19120 addressT where;
19121 char *ptr;
19122 /* The current word of data. */
19123 valueT data;
19124 /* The number of bytes left in this word. */
19125 int n;
19126
19127 finish_unwind_opcodes ();
19128
19129 /* Remember the current text section. */
19130 unwind.saved_seg = now_seg;
19131 unwind.saved_subseg = now_subseg;
19132
19133 start_unwind_section (now_seg, 0);
19134
19135 if (unwind.personality_routine == NULL)
19136 {
19137 if (unwind.personality_index == -2)
19138 {
19139 if (have_data)
19140 as_bad (_("handlerdata in cantunwind frame"));
19141 return 1; /* EXIDX_CANTUNWIND. */
19142 }
19143
19144 /* Use a default personality routine if none is specified. */
19145 if (unwind.personality_index == -1)
19146 {
19147 if (unwind.opcode_count > 3)
19148 unwind.personality_index = 1;
19149 else
19150 unwind.personality_index = 0;
19151 }
19152
19153 /* Space for the personality routine entry. */
19154 if (unwind.personality_index == 0)
19155 {
19156 if (unwind.opcode_count > 3)
19157 as_bad (_("too many unwind opcodes for personality routine 0"));
19158
19159 if (!have_data)
19160 {
19161 /* All the data is inline in the index table. */
19162 data = 0x80;
19163 n = 3;
19164 while (unwind.opcode_count > 0)
19165 {
19166 unwind.opcode_count--;
19167 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19168 n--;
19169 }
19170
19171 /* Pad with "finish" opcodes. */
19172 while (n--)
19173 data = (data << 8) | 0xb0;
19174
19175 return data;
19176 }
19177 size = 0;
19178 }
19179 else
19180 /* We get two opcodes "free" in the first word. */
19181 size = unwind.opcode_count - 2;
19182 }
19183 else
19184 /* An extra byte is required for the opcode count. */
19185 size = unwind.opcode_count + 1;
19186
19187 size = (size + 3) >> 2;
19188 if (size > 0xff)
19189 as_bad (_("too many unwind opcodes"));
19190
19191 frag_align (2, 0, 0);
19192 record_alignment (now_seg, 2);
19193 unwind.table_entry = expr_build_dot ();
19194
19195 /* Allocate the table entry. */
19196 ptr = frag_more ((size << 2) + 4);
19197 where = frag_now_fix () - ((size << 2) + 4);
19198
19199 switch (unwind.personality_index)
19200 {
19201 case -1:
19202 /* ??? Should this be a PLT generating relocation? */
19203 /* Custom personality routine. */
19204 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
19205 BFD_RELOC_ARM_PREL31);
19206
19207 where += 4;
19208 ptr += 4;
19209
19210 /* Set the first byte to the number of additional words. */
19211 data = size - 1;
19212 n = 3;
19213 break;
19214
19215 /* ABI defined personality routines. */
19216 case 0:
19217 /* Three opcodes bytes are packed into the first word. */
19218 data = 0x80;
19219 n = 3;
19220 break;
19221
19222 case 1:
19223 case 2:
19224 /* The size and first two opcode bytes go in the first word. */
19225 data = ((0x80 + unwind.personality_index) << 8) | size;
19226 n = 2;
19227 break;
19228
19229 default:
19230 /* Should never happen. */
19231 abort ();
19232 }
19233
19234 /* Pack the opcodes into words (MSB first), reversing the list at the same
19235 time. */
19236 while (unwind.opcode_count > 0)
19237 {
19238 if (n == 0)
19239 {
19240 md_number_to_chars (ptr, data, 4);
19241 ptr += 4;
19242 n = 4;
19243 data = 0;
19244 }
19245 unwind.opcode_count--;
19246 n--;
19247 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19248 }
19249
19250 /* Finish off the last word. */
19251 if (n < 4)
19252 {
19253 /* Pad with "finish" opcodes. */
19254 while (n--)
19255 data = (data << 8) | 0xb0;
19256
19257 md_number_to_chars (ptr, data, 4);
19258 }
19259
19260 if (!have_data)
19261 {
19262 /* Add an empty descriptor if there is no user-specified data. */
19263 ptr = frag_more (4);
19264 md_number_to_chars (ptr, 0, 4);
19265 }
19266
19267 return 0;
19268 }
19269
19270
19271 /* Initialize the DWARF-2 unwind information for this procedure. */
19272
19273 void
19274 tc_arm_frame_initial_instructions (void)
19275 {
19276 cfi_add_CFA_def_cfa (REG_SP, 0);
19277 }
19278 #endif /* OBJ_ELF */
19279
19280 /* Convert REGNAME to a DWARF-2 register number. */
19281
19282 int
19283 tc_arm_regname_to_dw2regnum (char *regname)
19284 {
19285 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
19286
19287 if (reg == FAIL)
19288 return -1;
19289
19290 return reg;
19291 }
19292
19293 #ifdef TE_PE
19294 void
19295 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
19296 {
19297 expressionS exp;
19298
19299 exp.X_op = O_secrel;
19300 exp.X_add_symbol = symbol;
19301 exp.X_add_number = 0;
19302 emit_expr (&exp, size);
19303 }
19304 #endif
19305
19306 /* MD interface: Symbol and relocation handling. */
19307
19308 /* Return the address within the segment that a PC-relative fixup is
19309 relative to. For ARM, PC-relative fixups applied to instructions
19310 are generally relative to the location of the fixup plus 8 bytes.
19311 Thumb branches are offset by 4, and Thumb loads relative to PC
19312 require special handling. */
19313
19314 long
19315 md_pcrel_from_section (fixS * fixP, segT seg)
19316 {
19317 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
19318
19319 /* If this is pc-relative and we are going to emit a relocation
19320 then we just want to put out any pipeline compensation that the linker
19321 will need. Otherwise we want to use the calculated base.
19322 For WinCE we skip the bias for externals as well, since this
19323 is how the MS ARM-CE assembler behaves and we want to be compatible. */
19324 if (fixP->fx_pcrel
19325 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19326 || (arm_force_relocation (fixP)
19327 #ifdef TE_WINCE
19328 && !S_IS_EXTERNAL (fixP->fx_addsy)
19329 #endif
19330 )))
19331 base = 0;
19332
19333
19334 switch (fixP->fx_r_type)
19335 {
19336 /* PC relative addressing on the Thumb is slightly odd as the
19337 bottom two bits of the PC are forced to zero for the
19338 calculation. This happens *after* application of the
19339 pipeline offset. However, Thumb adrl already adjusts for
19340 this, so we need not do it again. */
19341 case BFD_RELOC_ARM_THUMB_ADD:
19342 return base & ~3;
19343
19344 case BFD_RELOC_ARM_THUMB_OFFSET:
19345 case BFD_RELOC_ARM_T32_OFFSET_IMM:
19346 case BFD_RELOC_ARM_T32_ADD_PC12:
19347 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
19348 return (base + 4) & ~3;
19349
19350 /* Thumb branches are simply offset by +4. */
19351 case BFD_RELOC_THUMB_PCREL_BRANCH7:
19352 case BFD_RELOC_THUMB_PCREL_BRANCH9:
19353 case BFD_RELOC_THUMB_PCREL_BRANCH12:
19354 case BFD_RELOC_THUMB_PCREL_BRANCH20:
19355 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19356 return base + 4;
19357
19358 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19359 if (fixP->fx_addsy
19360 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19361 && (!S_IS_EXTERNAL (fixP->fx_addsy))
19362 && ARM_IS_FUNC (fixP->fx_addsy)
19363 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19364 base = fixP->fx_where + fixP->fx_frag->fr_address;
19365 return base + 4;
19366
19367 /* BLX is like branches above, but forces the low two bits of PC to
19368 zero. */
19369 case BFD_RELOC_THUMB_PCREL_BLX:
19370 if (fixP->fx_addsy
19371 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19372 && (!S_IS_EXTERNAL (fixP->fx_addsy))
19373 && THUMB_IS_FUNC (fixP->fx_addsy)
19374 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19375 base = fixP->fx_where + fixP->fx_frag->fr_address;
19376 return (base + 4) & ~3;
19377
19378 /* ARM mode branches are offset by +8. However, the Windows CE
19379 loader expects the relocation not to take this into account. */
19380 case BFD_RELOC_ARM_PCREL_BLX:
19381 if (fixP->fx_addsy
19382 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19383 && (!S_IS_EXTERNAL (fixP->fx_addsy))
19384 && ARM_IS_FUNC (fixP->fx_addsy)
19385 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19386 base = fixP->fx_where + fixP->fx_frag->fr_address;
19387 return base + 8;
19388
19389 case BFD_RELOC_ARM_PCREL_CALL:
19390 if (fixP->fx_addsy
19391 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19392 && (!S_IS_EXTERNAL (fixP->fx_addsy))
19393 && THUMB_IS_FUNC (fixP->fx_addsy)
19394 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19395 base = fixP->fx_where + fixP->fx_frag->fr_address;
19396 return base + 8;
19397
19398 case BFD_RELOC_ARM_PCREL_BRANCH:
19399 case BFD_RELOC_ARM_PCREL_JUMP:
19400 case BFD_RELOC_ARM_PLT32:
19401 #ifdef TE_WINCE
19402 /* When handling fixups immediately, because we have already
19403 discovered the value of a symbol, or the address of the frag involved
19404 we must account for the offset by +8, as the OS loader will never see the reloc.
19405 see fixup_segment() in write.c
19406 The S_IS_EXTERNAL test handles the case of global symbols.
19407 Those need the calculated base, not just the pipe compensation the linker will need. */
19408 if (fixP->fx_pcrel
19409 && fixP->fx_addsy != NULL
19410 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19411 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
19412 return base + 8;
19413 return base;
19414 #else
19415 return base + 8;
19416 #endif
19417
19418
19419 /* ARM mode loads relative to PC are also offset by +8. Unlike
19420 branches, the Windows CE loader *does* expect the relocation
19421 to take this into account. */
19422 case BFD_RELOC_ARM_OFFSET_IMM:
19423 case BFD_RELOC_ARM_OFFSET_IMM8:
19424 case BFD_RELOC_ARM_HWLITERAL:
19425 case BFD_RELOC_ARM_LITERAL:
19426 case BFD_RELOC_ARM_CP_OFF_IMM:
19427 return base + 8;
19428
19429
19430 /* Other PC-relative relocations are un-offset. */
19431 default:
19432 return base;
19433 }
19434 }
19435
19436 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19437 Otherwise we have no need to default values of symbols. */
19438
19439 symbolS *
19440 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
19441 {
19442 #ifdef OBJ_ELF
19443 if (name[0] == '_' && name[1] == 'G'
19444 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
19445 {
19446 if (!GOT_symbol)
19447 {
19448 if (symbol_find (name))
19449 as_bad (_("GOT already in the symbol table"));
19450
19451 GOT_symbol = symbol_new (name, undefined_section,
19452 (valueT) 0, & zero_address_frag);
19453 }
19454
19455 return GOT_symbol;
19456 }
19457 #endif
19458
19459 return NULL;
19460 }
19461
19462 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19463 computed as two separate immediate values, added together. We
19464 already know that this value cannot be computed by just one ARM
19465 instruction. */
19466
19467 static unsigned int
19468 validate_immediate_twopart (unsigned int val,
19469 unsigned int * highpart)
19470 {
19471 unsigned int a;
19472 unsigned int i;
19473
19474 for (i = 0; i < 32; i += 2)
19475 if (((a = rotate_left (val, i)) & 0xff) != 0)
19476 {
19477 if (a & 0xff00)
19478 {
19479 if (a & ~ 0xffff)
19480 continue;
19481 * highpart = (a >> 8) | ((i + 24) << 7);
19482 }
19483 else if (a & 0xff0000)
19484 {
19485 if (a & 0xff000000)
19486 continue;
19487 * highpart = (a >> 16) | ((i + 16) << 7);
19488 }
19489 else
19490 {
19491 gas_assert (a & 0xff000000);
19492 * highpart = (a >> 24) | ((i + 8) << 7);
19493 }
19494
19495 return (a & 0xff) | (i << 7);
19496 }
19497
19498 return FAIL;
19499 }
19500
19501 static int
19502 validate_offset_imm (unsigned int val, int hwse)
19503 {
19504 if ((hwse && val > 255) || val > 4095)
19505 return FAIL;
19506 return val;
19507 }
19508
19509 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19510 negative immediate constant by altering the instruction. A bit of
19511 a hack really.
19512 MOV <-> MVN
19513 AND <-> BIC
19514 ADC <-> SBC
19515 by inverting the second operand, and
19516 ADD <-> SUB
19517 CMP <-> CMN
19518 by negating the second operand. */
19519
19520 static int
19521 negate_data_op (unsigned long * instruction,
19522 unsigned long value)
19523 {
19524 int op, new_inst;
19525 unsigned long negated, inverted;
19526
19527 negated = encode_arm_immediate (-value);
19528 inverted = encode_arm_immediate (~value);
19529
19530 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
19531 switch (op)
19532 {
19533 /* First negates. */
19534 case OPCODE_SUB: /* ADD <-> SUB */
19535 new_inst = OPCODE_ADD;
19536 value = negated;
19537 break;
19538
19539 case OPCODE_ADD:
19540 new_inst = OPCODE_SUB;
19541 value = negated;
19542 break;
19543
19544 case OPCODE_CMP: /* CMP <-> CMN */
19545 new_inst = OPCODE_CMN;
19546 value = negated;
19547 break;
19548
19549 case OPCODE_CMN:
19550 new_inst = OPCODE_CMP;
19551 value = negated;
19552 break;
19553
19554 /* Now Inverted ops. */
19555 case OPCODE_MOV: /* MOV <-> MVN */
19556 new_inst = OPCODE_MVN;
19557 value = inverted;
19558 break;
19559
19560 case OPCODE_MVN:
19561 new_inst = OPCODE_MOV;
19562 value = inverted;
19563 break;
19564
19565 case OPCODE_AND: /* AND <-> BIC */
19566 new_inst = OPCODE_BIC;
19567 value = inverted;
19568 break;
19569
19570 case OPCODE_BIC:
19571 new_inst = OPCODE_AND;
19572 value = inverted;
19573 break;
19574
19575 case OPCODE_ADC: /* ADC <-> SBC */
19576 new_inst = OPCODE_SBC;
19577 value = inverted;
19578 break;
19579
19580 case OPCODE_SBC:
19581 new_inst = OPCODE_ADC;
19582 value = inverted;
19583 break;
19584
19585 /* We cannot do anything. */
19586 default:
19587 return FAIL;
19588 }
19589
19590 if (value == (unsigned) FAIL)
19591 return FAIL;
19592
19593 *instruction &= OPCODE_MASK;
19594 *instruction |= new_inst << DATA_OP_SHIFT;
19595 return value;
19596 }
19597
19598 /* Like negate_data_op, but for Thumb-2. */
19599
19600 static unsigned int
19601 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
19602 {
19603 int op, new_inst;
19604 int rd;
19605 unsigned int negated, inverted;
19606
19607 negated = encode_thumb32_immediate (-value);
19608 inverted = encode_thumb32_immediate (~value);
19609
19610 rd = (*instruction >> 8) & 0xf;
19611 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
19612 switch (op)
19613 {
19614 /* ADD <-> SUB. Includes CMP <-> CMN. */
19615 case T2_OPCODE_SUB:
19616 new_inst = T2_OPCODE_ADD;
19617 value = negated;
19618 break;
19619
19620 case T2_OPCODE_ADD:
19621 new_inst = T2_OPCODE_SUB;
19622 value = negated;
19623 break;
19624
19625 /* ORR <-> ORN. Includes MOV <-> MVN. */
19626 case T2_OPCODE_ORR:
19627 new_inst = T2_OPCODE_ORN;
19628 value = inverted;
19629 break;
19630
19631 case T2_OPCODE_ORN:
19632 new_inst = T2_OPCODE_ORR;
19633 value = inverted;
19634 break;
19635
19636 /* AND <-> BIC. TST has no inverted equivalent. */
19637 case T2_OPCODE_AND:
19638 new_inst = T2_OPCODE_BIC;
19639 if (rd == 15)
19640 value = FAIL;
19641 else
19642 value = inverted;
19643 break;
19644
19645 case T2_OPCODE_BIC:
19646 new_inst = T2_OPCODE_AND;
19647 value = inverted;
19648 break;
19649
19650 /* ADC <-> SBC */
19651 case T2_OPCODE_ADC:
19652 new_inst = T2_OPCODE_SBC;
19653 value = inverted;
19654 break;
19655
19656 case T2_OPCODE_SBC:
19657 new_inst = T2_OPCODE_ADC;
19658 value = inverted;
19659 break;
19660
19661 /* We cannot do anything. */
19662 default:
19663 return FAIL;
19664 }
19665
19666 if (value == (unsigned int)FAIL)
19667 return FAIL;
19668
19669 *instruction &= T2_OPCODE_MASK;
19670 *instruction |= new_inst << T2_DATA_OP_SHIFT;
19671 return value;
19672 }
19673
19674 /* Read a 32-bit thumb instruction from buf. */
19675 static unsigned long
19676 get_thumb32_insn (char * buf)
19677 {
19678 unsigned long insn;
19679 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
19680 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19681
19682 return insn;
19683 }
19684
19685
19686 /* We usually want to set the low bit on the address of thumb function
19687 symbols. In particular .word foo - . should have the low bit set.
19688 Generic code tries to fold the difference of two symbols to
19689 a constant. Prevent this and force a relocation when the first symbols
19690 is a thumb function. */
19691
19692 bfd_boolean
19693 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
19694 {
19695 if (op == O_subtract
19696 && l->X_op == O_symbol
19697 && r->X_op == O_symbol
19698 && THUMB_IS_FUNC (l->X_add_symbol))
19699 {
19700 l->X_op = O_subtract;
19701 l->X_op_symbol = r->X_add_symbol;
19702 l->X_add_number -= r->X_add_number;
19703 return TRUE;
19704 }
19705
19706 /* Process as normal. */
19707 return FALSE;
19708 }
19709
19710 /* Encode Thumb2 unconditional branches and calls. The encoding
19711 for the 2 are identical for the immediate values. */
19712
19713 static void
19714 encode_thumb2_b_bl_offset (char * buf, offsetT value)
19715 {
19716 #define T2I1I2MASK ((1 << 13) | (1 << 11))
19717 offsetT newval;
19718 offsetT newval2;
19719 addressT S, I1, I2, lo, hi;
19720
19721 S = (value >> 24) & 0x01;
19722 I1 = (value >> 23) & 0x01;
19723 I2 = (value >> 22) & 0x01;
19724 hi = (value >> 12) & 0x3ff;
19725 lo = (value >> 1) & 0x7ff;
19726 newval = md_chars_to_number (buf, THUMB_SIZE);
19727 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19728 newval |= (S << 10) | hi;
19729 newval2 &= ~T2I1I2MASK;
19730 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
19731 md_number_to_chars (buf, newval, THUMB_SIZE);
19732 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
19733 }
19734
19735 void
19736 md_apply_fix (fixS * fixP,
19737 valueT * valP,
19738 segT seg)
19739 {
19740 offsetT value = * valP;
19741 offsetT newval;
19742 unsigned int newimm;
19743 unsigned long temp;
19744 int sign;
19745 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
19746
19747 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
19748
19749 /* Note whether this will delete the relocation. */
19750
19751 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
19752 fixP->fx_done = 1;
19753
19754 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19755 consistency with the behaviour on 32-bit hosts. Remember value
19756 for emit_reloc. */
19757 value &= 0xffffffff;
19758 value ^= 0x80000000;
19759 value -= 0x80000000;
19760
19761 *valP = value;
19762 fixP->fx_addnumber = value;
19763
19764 /* Same treatment for fixP->fx_offset. */
19765 fixP->fx_offset &= 0xffffffff;
19766 fixP->fx_offset ^= 0x80000000;
19767 fixP->fx_offset -= 0x80000000;
19768
19769 switch (fixP->fx_r_type)
19770 {
19771 case BFD_RELOC_NONE:
19772 /* This will need to go in the object file. */
19773 fixP->fx_done = 0;
19774 break;
19775
19776 case BFD_RELOC_ARM_IMMEDIATE:
19777 /* We claim that this fixup has been processed here,
19778 even if in fact we generate an error because we do
19779 not have a reloc for it, so tc_gen_reloc will reject it. */
19780 fixP->fx_done = 1;
19781
19782 if (fixP->fx_addsy
19783 && ! S_IS_DEFINED (fixP->fx_addsy))
19784 {
19785 as_bad_where (fixP->fx_file, fixP->fx_line,
19786 _("undefined symbol %s used as an immediate value"),
19787 S_GET_NAME (fixP->fx_addsy));
19788 break;
19789 }
19790
19791 if (fixP->fx_addsy
19792 && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19793 {
19794 as_bad_where (fixP->fx_file, fixP->fx_line,
19795 _("symbol %s is in a different section"),
19796 S_GET_NAME (fixP->fx_addsy));
19797 break;
19798 }
19799
19800 newimm = encode_arm_immediate (value);
19801 temp = md_chars_to_number (buf, INSN_SIZE);
19802
19803 /* If the instruction will fail, see if we can fix things up by
19804 changing the opcode. */
19805 if (newimm == (unsigned int) FAIL
19806 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
19807 {
19808 as_bad_where (fixP->fx_file, fixP->fx_line,
19809 _("invalid constant (%lx) after fixup"),
19810 (unsigned long) value);
19811 break;
19812 }
19813
19814 newimm |= (temp & 0xfffff000);
19815 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
19816 break;
19817
19818 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19819 {
19820 unsigned int highpart = 0;
19821 unsigned int newinsn = 0xe1a00000; /* nop. */
19822
19823 if (fixP->fx_addsy
19824 && ! S_IS_DEFINED (fixP->fx_addsy))
19825 {
19826 as_bad_where (fixP->fx_file, fixP->fx_line,
19827 _("undefined symbol %s used as an immediate value"),
19828 S_GET_NAME (fixP->fx_addsy));
19829 break;
19830 }
19831
19832 if (fixP->fx_addsy
19833 && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19834 {
19835 as_bad_where (fixP->fx_file, fixP->fx_line,
19836 _("symbol %s is in a different section"),
19837 S_GET_NAME (fixP->fx_addsy));
19838 break;
19839 }
19840
19841 newimm = encode_arm_immediate (value);
19842 temp = md_chars_to_number (buf, INSN_SIZE);
19843
19844 /* If the instruction will fail, see if we can fix things up by
19845 changing the opcode. */
19846 if (newimm == (unsigned int) FAIL
19847 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
19848 {
19849 /* No ? OK - try using two ADD instructions to generate
19850 the value. */
19851 newimm = validate_immediate_twopart (value, & highpart);
19852
19853 /* Yes - then make sure that the second instruction is
19854 also an add. */
19855 if (newimm != (unsigned int) FAIL)
19856 newinsn = temp;
19857 /* Still No ? Try using a negated value. */
19858 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
19859 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
19860 /* Otherwise - give up. */
19861 else
19862 {
19863 as_bad_where (fixP->fx_file, fixP->fx_line,
19864 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
19865 (long) value);
19866 break;
19867 }
19868
19869 /* Replace the first operand in the 2nd instruction (which
19870 is the PC) with the destination register. We have
19871 already added in the PC in the first instruction and we
19872 do not want to do it again. */
19873 newinsn &= ~ 0xf0000;
19874 newinsn |= ((newinsn & 0x0f000) << 4);
19875 }
19876
19877 newimm |= (temp & 0xfffff000);
19878 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
19879
19880 highpart |= (newinsn & 0xfffff000);
19881 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
19882 }
19883 break;
19884
19885 case BFD_RELOC_ARM_OFFSET_IMM:
19886 if (!fixP->fx_done && seg->use_rela_p)
19887 value = 0;
19888
19889 case BFD_RELOC_ARM_LITERAL:
19890 sign = value >= 0;
19891
19892 if (value < 0)
19893 value = - value;
19894
19895 if (validate_offset_imm (value, 0) == FAIL)
19896 {
19897 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
19898 as_bad_where (fixP->fx_file, fixP->fx_line,
19899 _("invalid literal constant: pool needs to be closer"));
19900 else
19901 as_bad_where (fixP->fx_file, fixP->fx_line,
19902 _("bad immediate value for offset (%ld)"),
19903 (long) value);
19904 break;
19905 }
19906
19907 newval = md_chars_to_number (buf, INSN_SIZE);
19908 newval &= 0xff7ff000;
19909 newval |= value | (sign ? INDEX_UP : 0);
19910 md_number_to_chars (buf, newval, INSN_SIZE);
19911 break;
19912
19913 case BFD_RELOC_ARM_OFFSET_IMM8:
19914 case BFD_RELOC_ARM_HWLITERAL:
19915 sign = value >= 0;
19916
19917 if (value < 0)
19918 value = - value;
19919
19920 if (validate_offset_imm (value, 1) == FAIL)
19921 {
19922 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
19923 as_bad_where (fixP->fx_file, fixP->fx_line,
19924 _("invalid literal constant: pool needs to be closer"));
19925 else
19926 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
19927 (long) value);
19928 break;
19929 }
19930
19931 newval = md_chars_to_number (buf, INSN_SIZE);
19932 newval &= 0xff7ff0f0;
19933 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
19934 md_number_to_chars (buf, newval, INSN_SIZE);
19935 break;
19936
19937 case BFD_RELOC_ARM_T32_OFFSET_U8:
19938 if (value < 0 || value > 1020 || value % 4 != 0)
19939 as_bad_where (fixP->fx_file, fixP->fx_line,
19940 _("bad immediate value for offset (%ld)"), (long) value);
19941 value /= 4;
19942
19943 newval = md_chars_to_number (buf+2, THUMB_SIZE);
19944 newval |= value;
19945 md_number_to_chars (buf+2, newval, THUMB_SIZE);
19946 break;
19947
19948 case BFD_RELOC_ARM_T32_OFFSET_IMM:
19949 /* This is a complicated relocation used for all varieties of Thumb32
19950 load/store instruction with immediate offset:
19951
19952 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
19953 *4, optional writeback(W)
19954 (doubleword load/store)
19955
19956 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
19957 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
19958 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
19959 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
19960 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
19961
19962 Uppercase letters indicate bits that are already encoded at
19963 this point. Lowercase letters are our problem. For the
19964 second block of instructions, the secondary opcode nybble
19965 (bits 8..11) is present, and bit 23 is zero, even if this is
19966 a PC-relative operation. */
19967 newval = md_chars_to_number (buf, THUMB_SIZE);
19968 newval <<= 16;
19969 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
19970
19971 if ((newval & 0xf0000000) == 0xe0000000)
19972 {
19973 /* Doubleword load/store: 8-bit offset, scaled by 4. */
19974 if (value >= 0)
19975 newval |= (1 << 23);
19976 else
19977 value = -value;
19978 if (value % 4 != 0)
19979 {
19980 as_bad_where (fixP->fx_file, fixP->fx_line,
19981 _("offset not a multiple of 4"));
19982 break;
19983 }
19984 value /= 4;
19985 if (value > 0xff)
19986 {
19987 as_bad_where (fixP->fx_file, fixP->fx_line,
19988 _("offset out of range"));
19989 break;
19990 }
19991 newval &= ~0xff;
19992 }
19993 else if ((newval & 0x000f0000) == 0x000f0000)
19994 {
19995 /* PC-relative, 12-bit offset. */
19996 if (value >= 0)
19997 newval |= (1 << 23);
19998 else
19999 value = -value;
20000 if (value > 0xfff)
20001 {
20002 as_bad_where (fixP->fx_file, fixP->fx_line,
20003 _("offset out of range"));
20004 break;
20005 }
20006 newval &= ~0xfff;
20007 }
20008 else if ((newval & 0x00000100) == 0x00000100)
20009 {
20010 /* Writeback: 8-bit, +/- offset. */
20011 if (value >= 0)
20012 newval |= (1 << 9);
20013 else
20014 value = -value;
20015 if (value > 0xff)
20016 {
20017 as_bad_where (fixP->fx_file, fixP->fx_line,
20018 _("offset out of range"));
20019 break;
20020 }
20021 newval &= ~0xff;
20022 }
20023 else if ((newval & 0x00000f00) == 0x00000e00)
20024 {
20025 /* T-instruction: positive 8-bit offset. */
20026 if (value < 0 || value > 0xff)
20027 {
20028 as_bad_where (fixP->fx_file, fixP->fx_line,
20029 _("offset out of range"));
20030 break;
20031 }
20032 newval &= ~0xff;
20033 newval |= value;
20034 }
20035 else
20036 {
20037 /* Positive 12-bit or negative 8-bit offset. */
20038 int limit;
20039 if (value >= 0)
20040 {
20041 newval |= (1 << 23);
20042 limit = 0xfff;
20043 }
20044 else
20045 {
20046 value = -value;
20047 limit = 0xff;
20048 }
20049 if (value > limit)
20050 {
20051 as_bad_where (fixP->fx_file, fixP->fx_line,
20052 _("offset out of range"));
20053 break;
20054 }
20055 newval &= ~limit;
20056 }
20057
20058 newval |= value;
20059 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
20060 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
20061 break;
20062
20063 case BFD_RELOC_ARM_SHIFT_IMM:
20064 newval = md_chars_to_number (buf, INSN_SIZE);
20065 if (((unsigned long) value) > 32
20066 || (value == 32
20067 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
20068 {
20069 as_bad_where (fixP->fx_file, fixP->fx_line,
20070 _("shift expression is too large"));
20071 break;
20072 }
20073
20074 if (value == 0)
20075 /* Shifts of zero must be done as lsl. */
20076 newval &= ~0x60;
20077 else if (value == 32)
20078 value = 0;
20079 newval &= 0xfffff07f;
20080 newval |= (value & 0x1f) << 7;
20081 md_number_to_chars (buf, newval, INSN_SIZE);
20082 break;
20083
20084 case BFD_RELOC_ARM_T32_IMMEDIATE:
20085 case BFD_RELOC_ARM_T32_ADD_IMM:
20086 case BFD_RELOC_ARM_T32_IMM12:
20087 case BFD_RELOC_ARM_T32_ADD_PC12:
20088 /* We claim that this fixup has been processed here,
20089 even if in fact we generate an error because we do
20090 not have a reloc for it, so tc_gen_reloc will reject it. */
20091 fixP->fx_done = 1;
20092
20093 if (fixP->fx_addsy
20094 && ! S_IS_DEFINED (fixP->fx_addsy))
20095 {
20096 as_bad_where (fixP->fx_file, fixP->fx_line,
20097 _("undefined symbol %s used as an immediate value"),
20098 S_GET_NAME (fixP->fx_addsy));
20099 break;
20100 }
20101
20102 newval = md_chars_to_number (buf, THUMB_SIZE);
20103 newval <<= 16;
20104 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
20105
20106 newimm = FAIL;
20107 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
20108 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20109 {
20110 newimm = encode_thumb32_immediate (value);
20111 if (newimm == (unsigned int) FAIL)
20112 newimm = thumb32_negate_data_op (&newval, value);
20113 }
20114 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
20115 && newimm == (unsigned int) FAIL)
20116 {
20117 /* Turn add/sum into addw/subw. */
20118 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20119 newval = (newval & 0xfeffffff) | 0x02000000;
20120
20121 /* 12 bit immediate for addw/subw. */
20122 if (value < 0)
20123 {
20124 value = -value;
20125 newval ^= 0x00a00000;
20126 }
20127 if (value > 0xfff)
20128 newimm = (unsigned int) FAIL;
20129 else
20130 newimm = value;
20131 }
20132
20133 if (newimm == (unsigned int)FAIL)
20134 {
20135 as_bad_where (fixP->fx_file, fixP->fx_line,
20136 _("invalid constant (%lx) after fixup"),
20137 (unsigned long) value);
20138 break;
20139 }
20140
20141 newval |= (newimm & 0x800) << 15;
20142 newval |= (newimm & 0x700) << 4;
20143 newval |= (newimm & 0x0ff);
20144
20145 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
20146 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
20147 break;
20148
20149 case BFD_RELOC_ARM_SMC:
20150 if (((unsigned long) value) > 0xffff)
20151 as_bad_where (fixP->fx_file, fixP->fx_line,
20152 _("invalid smc expression"));
20153 newval = md_chars_to_number (buf, INSN_SIZE);
20154 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20155 md_number_to_chars (buf, newval, INSN_SIZE);
20156 break;
20157
20158 case BFD_RELOC_ARM_SWI:
20159 if (fixP->tc_fix_data != 0)
20160 {
20161 if (((unsigned long) value) > 0xff)
20162 as_bad_where (fixP->fx_file, fixP->fx_line,
20163 _("invalid swi expression"));
20164 newval = md_chars_to_number (buf, THUMB_SIZE);
20165 newval |= value;
20166 md_number_to_chars (buf, newval, THUMB_SIZE);
20167 }
20168 else
20169 {
20170 if (((unsigned long) value) > 0x00ffffff)
20171 as_bad_where (fixP->fx_file, fixP->fx_line,
20172 _("invalid swi expression"));
20173 newval = md_chars_to_number (buf, INSN_SIZE);
20174 newval |= value;
20175 md_number_to_chars (buf, newval, INSN_SIZE);
20176 }
20177 break;
20178
20179 case BFD_RELOC_ARM_MULTI:
20180 if (((unsigned long) value) > 0xffff)
20181 as_bad_where (fixP->fx_file, fixP->fx_line,
20182 _("invalid expression in load/store multiple"));
20183 newval = value | md_chars_to_number (buf, INSN_SIZE);
20184 md_number_to_chars (buf, newval, INSN_SIZE);
20185 break;
20186
20187 #ifdef OBJ_ELF
20188 case BFD_RELOC_ARM_PCREL_CALL:
20189
20190 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20191 && fixP->fx_addsy
20192 && !S_IS_EXTERNAL (fixP->fx_addsy)
20193 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20194 && THUMB_IS_FUNC (fixP->fx_addsy))
20195 /* Flip the bl to blx. This is a simple flip
20196 bit here because we generate PCREL_CALL for
20197 unconditional bls. */
20198 {
20199 newval = md_chars_to_number (buf, INSN_SIZE);
20200 newval = newval | 0x10000000;
20201 md_number_to_chars (buf, newval, INSN_SIZE);
20202 temp = 1;
20203 fixP->fx_done = 1;
20204 }
20205 else
20206 temp = 3;
20207 goto arm_branch_common;
20208
20209 case BFD_RELOC_ARM_PCREL_JUMP:
20210 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20211 && fixP->fx_addsy
20212 && !S_IS_EXTERNAL (fixP->fx_addsy)
20213 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20214 && THUMB_IS_FUNC (fixP->fx_addsy))
20215 {
20216 /* This would map to a bl<cond>, b<cond>,
20217 b<always> to a Thumb function. We
20218 need to force a relocation for this particular
20219 case. */
20220 newval = md_chars_to_number (buf, INSN_SIZE);
20221 fixP->fx_done = 0;
20222 }
20223
20224 case BFD_RELOC_ARM_PLT32:
20225 #endif
20226 case BFD_RELOC_ARM_PCREL_BRANCH:
20227 temp = 3;
20228 goto arm_branch_common;
20229
20230 case BFD_RELOC_ARM_PCREL_BLX:
20231
20232 temp = 1;
20233 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20234 && fixP->fx_addsy
20235 && !S_IS_EXTERNAL (fixP->fx_addsy)
20236 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20237 && ARM_IS_FUNC (fixP->fx_addsy))
20238 {
20239 /* Flip the blx to a bl and warn. */
20240 const char *name = S_GET_NAME (fixP->fx_addsy);
20241 newval = 0xeb000000;
20242 as_warn_where (fixP->fx_file, fixP->fx_line,
20243 _("blx to '%s' an ARM ISA state function changed to bl"),
20244 name);
20245 md_number_to_chars (buf, newval, INSN_SIZE);
20246 temp = 3;
20247 fixP->fx_done = 1;
20248 }
20249
20250 #ifdef OBJ_ELF
20251 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
20252 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
20253 #endif
20254
20255 arm_branch_common:
20256 /* We are going to store value (shifted right by two) in the
20257 instruction, in a 24 bit, signed field. Bits 26 through 32 either
20258 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
20259 also be be clear. */
20260 if (value & temp)
20261 as_bad_where (fixP->fx_file, fixP->fx_line,
20262 _("misaligned branch destination"));
20263 if ((value & (offsetT)0xfe000000) != (offsetT)0
20264 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
20265 as_bad_where (fixP->fx_file, fixP->fx_line,
20266 _("branch out of range"));
20267
20268 if (fixP->fx_done || !seg->use_rela_p)
20269 {
20270 newval = md_chars_to_number (buf, INSN_SIZE);
20271 newval |= (value >> 2) & 0x00ffffff;
20272 /* Set the H bit on BLX instructions. */
20273 if (temp == 1)
20274 {
20275 if (value & 2)
20276 newval |= 0x01000000;
20277 else
20278 newval &= ~0x01000000;
20279 }
20280 md_number_to_chars (buf, newval, INSN_SIZE);
20281 }
20282 break;
20283
20284 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
20285 /* CBZ can only branch forward. */
20286
20287 /* Attempts to use CBZ to branch to the next instruction
20288 (which, strictly speaking, are prohibited) will be turned into
20289 no-ops.
20290
20291 FIXME: It may be better to remove the instruction completely and
20292 perform relaxation. */
20293 if (value == -2)
20294 {
20295 newval = md_chars_to_number (buf, THUMB_SIZE);
20296 newval = 0xbf00; /* NOP encoding T1 */
20297 md_number_to_chars (buf, newval, THUMB_SIZE);
20298 }
20299 else
20300 {
20301 if (value & ~0x7e)
20302 as_bad_where (fixP->fx_file, fixP->fx_line,
20303 _("branch out of range"));
20304
20305 if (fixP->fx_done || !seg->use_rela_p)
20306 {
20307 newval = md_chars_to_number (buf, THUMB_SIZE);
20308 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
20309 md_number_to_chars (buf, newval, THUMB_SIZE);
20310 }
20311 }
20312 break;
20313
20314 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
20315 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
20316 as_bad_where (fixP->fx_file, fixP->fx_line,
20317 _("branch out of range"));
20318
20319 if (fixP->fx_done || !seg->use_rela_p)
20320 {
20321 newval = md_chars_to_number (buf, THUMB_SIZE);
20322 newval |= (value & 0x1ff) >> 1;
20323 md_number_to_chars (buf, newval, THUMB_SIZE);
20324 }
20325 break;
20326
20327 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
20328 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
20329 as_bad_where (fixP->fx_file, fixP->fx_line,
20330 _("branch out of range"));
20331
20332 if (fixP->fx_done || !seg->use_rela_p)
20333 {
20334 newval = md_chars_to_number (buf, THUMB_SIZE);
20335 newval |= (value & 0xfff) >> 1;
20336 md_number_to_chars (buf, newval, THUMB_SIZE);
20337 }
20338 break;
20339
20340 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20341 if (fixP->fx_addsy
20342 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20343 && !S_IS_EXTERNAL (fixP->fx_addsy)
20344 && S_IS_DEFINED (fixP->fx_addsy)
20345 && ARM_IS_FUNC (fixP->fx_addsy)
20346 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20347 {
20348 /* Force a relocation for a branch 20 bits wide. */
20349 fixP->fx_done = 0;
20350 }
20351 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
20352 as_bad_where (fixP->fx_file, fixP->fx_line,
20353 _("conditional branch out of range"));
20354
20355 if (fixP->fx_done || !seg->use_rela_p)
20356 {
20357 offsetT newval2;
20358 addressT S, J1, J2, lo, hi;
20359
20360 S = (value & 0x00100000) >> 20;
20361 J2 = (value & 0x00080000) >> 19;
20362 J1 = (value & 0x00040000) >> 18;
20363 hi = (value & 0x0003f000) >> 12;
20364 lo = (value & 0x00000ffe) >> 1;
20365
20366 newval = md_chars_to_number (buf, THUMB_SIZE);
20367 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20368 newval |= (S << 10) | hi;
20369 newval2 |= (J1 << 13) | (J2 << 11) | lo;
20370 md_number_to_chars (buf, newval, THUMB_SIZE);
20371 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20372 }
20373 break;
20374
20375 case BFD_RELOC_THUMB_PCREL_BLX:
20376
20377 /* If there is a blx from a thumb state function to
20378 another thumb function flip this to a bl and warn
20379 about it. */
20380
20381 if (fixP->fx_addsy
20382 && S_IS_DEFINED (fixP->fx_addsy)
20383 && !S_IS_EXTERNAL (fixP->fx_addsy)
20384 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20385 && THUMB_IS_FUNC (fixP->fx_addsy))
20386 {
20387 const char *name = S_GET_NAME (fixP->fx_addsy);
20388 as_warn_where (fixP->fx_file, fixP->fx_line,
20389 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20390 name);
20391 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20392 newval = newval | 0x1000;
20393 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20394 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20395 fixP->fx_done = 1;
20396 }
20397
20398
20399 goto thumb_bl_common;
20400
20401 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20402
20403 /* A bl from Thumb state ISA to an internal ARM state function
20404 is converted to a blx. */
20405 if (fixP->fx_addsy
20406 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20407 && !S_IS_EXTERNAL (fixP->fx_addsy)
20408 && S_IS_DEFINED (fixP->fx_addsy)
20409 && ARM_IS_FUNC (fixP->fx_addsy)
20410 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20411 {
20412 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20413 newval = newval & ~0x1000;
20414 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20415 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
20416 fixP->fx_done = 1;
20417 }
20418
20419 thumb_bl_common:
20420
20421 #ifdef OBJ_ELF
20422 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4 &&
20423 fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20424 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20425 #endif
20426
20427 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20428 /* For a BLX instruction, make sure that the relocation is rounded up
20429 to a word boundary. This follows the semantics of the instruction
20430 which specifies that bit 1 of the target address will come from bit
20431 1 of the base address. */
20432 value = (value + 1) & ~ 1;
20433
20434
20435 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
20436 {
20437 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
20438 {
20439 as_bad_where (fixP->fx_file, fixP->fx_line,
20440 _("branch out of range"));
20441 }
20442 else if ((value & ~0x1ffffff)
20443 && ((value & ~0x1ffffff) != ~0x1ffffff))
20444 {
20445 as_bad_where (fixP->fx_file, fixP->fx_line,
20446 _("Thumb2 branch out of range"));
20447 }
20448 }
20449
20450 if (fixP->fx_done || !seg->use_rela_p)
20451 encode_thumb2_b_bl_offset (buf, value);
20452
20453 break;
20454
20455 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20456 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
20457 as_bad_where (fixP->fx_file, fixP->fx_line,
20458 _("branch out of range"));
20459
20460 if (fixP->fx_done || !seg->use_rela_p)
20461 encode_thumb2_b_bl_offset (buf, value);
20462
20463 break;
20464
20465 case BFD_RELOC_8:
20466 if (fixP->fx_done || !seg->use_rela_p)
20467 md_number_to_chars (buf, value, 1);
20468 break;
20469
20470 case BFD_RELOC_16:
20471 if (fixP->fx_done || !seg->use_rela_p)
20472 md_number_to_chars (buf, value, 2);
20473 break;
20474
20475 #ifdef OBJ_ELF
20476 case BFD_RELOC_ARM_TLS_GD32:
20477 case BFD_RELOC_ARM_TLS_LE32:
20478 case BFD_RELOC_ARM_TLS_IE32:
20479 case BFD_RELOC_ARM_TLS_LDM32:
20480 case BFD_RELOC_ARM_TLS_LDO32:
20481 S_SET_THREAD_LOCAL (fixP->fx_addsy);
20482 /* fall through */
20483
20484 case BFD_RELOC_ARM_GOT32:
20485 case BFD_RELOC_ARM_GOTOFF:
20486 if (fixP->fx_done || !seg->use_rela_p)
20487 md_number_to_chars (buf, 0, 4);
20488 break;
20489
20490 case BFD_RELOC_ARM_GOT_PREL:
20491 if (fixP->fx_done || !seg->use_rela_p)
20492 md_number_to_chars (buf, value, 4);
20493 break;
20494
20495 case BFD_RELOC_ARM_TARGET2:
20496 /* TARGET2 is not partial-inplace, so we need to write the
20497 addend here for REL targets, because it won't be written out
20498 during reloc processing later. */
20499 if (fixP->fx_done || !seg->use_rela_p)
20500 md_number_to_chars (buf, fixP->fx_offset, 4);
20501 break;
20502 #endif
20503
20504 case BFD_RELOC_RVA:
20505 case BFD_RELOC_32:
20506 case BFD_RELOC_ARM_TARGET1:
20507 case BFD_RELOC_ARM_ROSEGREL32:
20508 case BFD_RELOC_ARM_SBREL32:
20509 case BFD_RELOC_32_PCREL:
20510 #ifdef TE_PE
20511 case BFD_RELOC_32_SECREL:
20512 #endif
20513 if (fixP->fx_done || !seg->use_rela_p)
20514 #ifdef TE_WINCE
20515 /* For WinCE we only do this for pcrel fixups. */
20516 if (fixP->fx_done || fixP->fx_pcrel)
20517 #endif
20518 md_number_to_chars (buf, value, 4);
20519 break;
20520
20521 #ifdef OBJ_ELF
20522 case BFD_RELOC_ARM_PREL31:
20523 if (fixP->fx_done || !seg->use_rela_p)
20524 {
20525 newval = md_chars_to_number (buf, 4) & 0x80000000;
20526 if ((value ^ (value >> 1)) & 0x40000000)
20527 {
20528 as_bad_where (fixP->fx_file, fixP->fx_line,
20529 _("rel31 relocation overflow"));
20530 }
20531 newval |= value & 0x7fffffff;
20532 md_number_to_chars (buf, newval, 4);
20533 }
20534 break;
20535 #endif
20536
20537 case BFD_RELOC_ARM_CP_OFF_IMM:
20538 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20539 if (value < -1023 || value > 1023 || (value & 3))
20540 as_bad_where (fixP->fx_file, fixP->fx_line,
20541 _("co-processor offset out of range"));
20542 cp_off_common:
20543 sign = value >= 0;
20544 if (value < 0)
20545 value = -value;
20546 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20547 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20548 newval = md_chars_to_number (buf, INSN_SIZE);
20549 else
20550 newval = get_thumb32_insn (buf);
20551 newval &= 0xff7fff00;
20552 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
20553 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20554 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20555 md_number_to_chars (buf, newval, INSN_SIZE);
20556 else
20557 put_thumb32_insn (buf, newval);
20558 break;
20559
20560 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
20561 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
20562 if (value < -255 || value > 255)
20563 as_bad_where (fixP->fx_file, fixP->fx_line,
20564 _("co-processor offset out of range"));
20565 value *= 4;
20566 goto cp_off_common;
20567
20568 case BFD_RELOC_ARM_THUMB_OFFSET:
20569 newval = md_chars_to_number (buf, THUMB_SIZE);
20570 /* Exactly what ranges, and where the offset is inserted depends
20571 on the type of instruction, we can establish this from the
20572 top 4 bits. */
20573 switch (newval >> 12)
20574 {
20575 case 4: /* PC load. */
20576 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20577 forced to zero for these loads; md_pcrel_from has already
20578 compensated for this. */
20579 if (value & 3)
20580 as_bad_where (fixP->fx_file, fixP->fx_line,
20581 _("invalid offset, target not word aligned (0x%08lX)"),
20582 (((unsigned long) fixP->fx_frag->fr_address
20583 + (unsigned long) fixP->fx_where) & ~3)
20584 + (unsigned long) value);
20585
20586 if (value & ~0x3fc)
20587 as_bad_where (fixP->fx_file, fixP->fx_line,
20588 _("invalid offset, value too big (0x%08lX)"),
20589 (long) value);
20590
20591 newval |= value >> 2;
20592 break;
20593
20594 case 9: /* SP load/store. */
20595 if (value & ~0x3fc)
20596 as_bad_where (fixP->fx_file, fixP->fx_line,
20597 _("invalid offset, value too big (0x%08lX)"),
20598 (long) value);
20599 newval |= value >> 2;
20600 break;
20601
20602 case 6: /* Word load/store. */
20603 if (value & ~0x7c)
20604 as_bad_where (fixP->fx_file, fixP->fx_line,
20605 _("invalid offset, value too big (0x%08lX)"),
20606 (long) value);
20607 newval |= value << 4; /* 6 - 2. */
20608 break;
20609
20610 case 7: /* Byte load/store. */
20611 if (value & ~0x1f)
20612 as_bad_where (fixP->fx_file, fixP->fx_line,
20613 _("invalid offset, value too big (0x%08lX)"),
20614 (long) value);
20615 newval |= value << 6;
20616 break;
20617
20618 case 8: /* Halfword load/store. */
20619 if (value & ~0x3e)
20620 as_bad_where (fixP->fx_file, fixP->fx_line,
20621 _("invalid offset, value too big (0x%08lX)"),
20622 (long) value);
20623 newval |= value << 5; /* 6 - 1. */
20624 break;
20625
20626 default:
20627 as_bad_where (fixP->fx_file, fixP->fx_line,
20628 "Unable to process relocation for thumb opcode: %lx",
20629 (unsigned long) newval);
20630 break;
20631 }
20632 md_number_to_chars (buf, newval, THUMB_SIZE);
20633 break;
20634
20635 case BFD_RELOC_ARM_THUMB_ADD:
20636 /* This is a complicated relocation, since we use it for all of
20637 the following immediate relocations:
20638
20639 3bit ADD/SUB
20640 8bit ADD/SUB
20641 9bit ADD/SUB SP word-aligned
20642 10bit ADD PC/SP word-aligned
20643
20644 The type of instruction being processed is encoded in the
20645 instruction field:
20646
20647 0x8000 SUB
20648 0x00F0 Rd
20649 0x000F Rs
20650 */
20651 newval = md_chars_to_number (buf, THUMB_SIZE);
20652 {
20653 int rd = (newval >> 4) & 0xf;
20654 int rs = newval & 0xf;
20655 int subtract = !!(newval & 0x8000);
20656
20657 /* Check for HI regs, only very restricted cases allowed:
20658 Adjusting SP, and using PC or SP to get an address. */
20659 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
20660 || (rs > 7 && rs != REG_SP && rs != REG_PC))
20661 as_bad_where (fixP->fx_file, fixP->fx_line,
20662 _("invalid Hi register with immediate"));
20663
20664 /* If value is negative, choose the opposite instruction. */
20665 if (value < 0)
20666 {
20667 value = -value;
20668 subtract = !subtract;
20669 if (value < 0)
20670 as_bad_where (fixP->fx_file, fixP->fx_line,
20671 _("immediate value out of range"));
20672 }
20673
20674 if (rd == REG_SP)
20675 {
20676 if (value & ~0x1fc)
20677 as_bad_where (fixP->fx_file, fixP->fx_line,
20678 _("invalid immediate for stack address calculation"));
20679 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
20680 newval |= value >> 2;
20681 }
20682 else if (rs == REG_PC || rs == REG_SP)
20683 {
20684 if (subtract || value & ~0x3fc)
20685 as_bad_where (fixP->fx_file, fixP->fx_line,
20686 _("invalid immediate for address calculation (value = 0x%08lX)"),
20687 (unsigned long) value);
20688 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
20689 newval |= rd << 8;
20690 newval |= value >> 2;
20691 }
20692 else if (rs == rd)
20693 {
20694 if (value & ~0xff)
20695 as_bad_where (fixP->fx_file, fixP->fx_line,
20696 _("immediate value out of range"));
20697 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
20698 newval |= (rd << 8) | value;
20699 }
20700 else
20701 {
20702 if (value & ~0x7)
20703 as_bad_where (fixP->fx_file, fixP->fx_line,
20704 _("immediate value out of range"));
20705 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
20706 newval |= rd | (rs << 3) | (value << 6);
20707 }
20708 }
20709 md_number_to_chars (buf, newval, THUMB_SIZE);
20710 break;
20711
20712 case BFD_RELOC_ARM_THUMB_IMM:
20713 newval = md_chars_to_number (buf, THUMB_SIZE);
20714 if (value < 0 || value > 255)
20715 as_bad_where (fixP->fx_file, fixP->fx_line,
20716 _("invalid immediate: %ld is out of range"),
20717 (long) value);
20718 newval |= value;
20719 md_number_to_chars (buf, newval, THUMB_SIZE);
20720 break;
20721
20722 case BFD_RELOC_ARM_THUMB_SHIFT:
20723 /* 5bit shift value (0..32). LSL cannot take 32. */
20724 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
20725 temp = newval & 0xf800;
20726 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
20727 as_bad_where (fixP->fx_file, fixP->fx_line,
20728 _("invalid shift value: %ld"), (long) value);
20729 /* Shifts of zero must be encoded as LSL. */
20730 if (value == 0)
20731 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
20732 /* Shifts of 32 are encoded as zero. */
20733 else if (value == 32)
20734 value = 0;
20735 newval |= value << 6;
20736 md_number_to_chars (buf, newval, THUMB_SIZE);
20737 break;
20738
20739 case BFD_RELOC_VTABLE_INHERIT:
20740 case BFD_RELOC_VTABLE_ENTRY:
20741 fixP->fx_done = 0;
20742 return;
20743
20744 case BFD_RELOC_ARM_MOVW:
20745 case BFD_RELOC_ARM_MOVT:
20746 case BFD_RELOC_ARM_THUMB_MOVW:
20747 case BFD_RELOC_ARM_THUMB_MOVT:
20748 if (fixP->fx_done || !seg->use_rela_p)
20749 {
20750 /* REL format relocations are limited to a 16-bit addend. */
20751 if (!fixP->fx_done)
20752 {
20753 if (value < -0x8000 || value > 0x7fff)
20754 as_bad_where (fixP->fx_file, fixP->fx_line,
20755 _("offset out of range"));
20756 }
20757 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
20758 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20759 {
20760 value >>= 16;
20761 }
20762
20763 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
20764 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20765 {
20766 newval = get_thumb32_insn (buf);
20767 newval &= 0xfbf08f00;
20768 newval |= (value & 0xf000) << 4;
20769 newval |= (value & 0x0800) << 15;
20770 newval |= (value & 0x0700) << 4;
20771 newval |= (value & 0x00ff);
20772 put_thumb32_insn (buf, newval);
20773 }
20774 else
20775 {
20776 newval = md_chars_to_number (buf, 4);
20777 newval &= 0xfff0f000;
20778 newval |= value & 0x0fff;
20779 newval |= (value & 0xf000) << 4;
20780 md_number_to_chars (buf, newval, 4);
20781 }
20782 }
20783 return;
20784
20785 case BFD_RELOC_ARM_ALU_PC_G0_NC:
20786 case BFD_RELOC_ARM_ALU_PC_G0:
20787 case BFD_RELOC_ARM_ALU_PC_G1_NC:
20788 case BFD_RELOC_ARM_ALU_PC_G1:
20789 case BFD_RELOC_ARM_ALU_PC_G2:
20790 case BFD_RELOC_ARM_ALU_SB_G0_NC:
20791 case BFD_RELOC_ARM_ALU_SB_G0:
20792 case BFD_RELOC_ARM_ALU_SB_G1_NC:
20793 case BFD_RELOC_ARM_ALU_SB_G1:
20794 case BFD_RELOC_ARM_ALU_SB_G2:
20795 gas_assert (!fixP->fx_done);
20796 if (!seg->use_rela_p)
20797 {
20798 bfd_vma insn;
20799 bfd_vma encoded_addend;
20800 bfd_vma addend_abs = abs (value);
20801
20802 /* Check that the absolute value of the addend can be
20803 expressed as an 8-bit constant plus a rotation. */
20804 encoded_addend = encode_arm_immediate (addend_abs);
20805 if (encoded_addend == (unsigned int) FAIL)
20806 as_bad_where (fixP->fx_file, fixP->fx_line,
20807 _("the offset 0x%08lX is not representable"),
20808 (unsigned long) addend_abs);
20809
20810 /* Extract the instruction. */
20811 insn = md_chars_to_number (buf, INSN_SIZE);
20812
20813 /* If the addend is positive, use an ADD instruction.
20814 Otherwise use a SUB. Take care not to destroy the S bit. */
20815 insn &= 0xff1fffff;
20816 if (value < 0)
20817 insn |= 1 << 22;
20818 else
20819 insn |= 1 << 23;
20820
20821 /* Place the encoded addend into the first 12 bits of the
20822 instruction. */
20823 insn &= 0xfffff000;
20824 insn |= encoded_addend;
20825
20826 /* Update the instruction. */
20827 md_number_to_chars (buf, insn, INSN_SIZE);
20828 }
20829 break;
20830
20831 case BFD_RELOC_ARM_LDR_PC_G0:
20832 case BFD_RELOC_ARM_LDR_PC_G1:
20833 case BFD_RELOC_ARM_LDR_PC_G2:
20834 case BFD_RELOC_ARM_LDR_SB_G0:
20835 case BFD_RELOC_ARM_LDR_SB_G1:
20836 case BFD_RELOC_ARM_LDR_SB_G2:
20837 gas_assert (!fixP->fx_done);
20838 if (!seg->use_rela_p)
20839 {
20840 bfd_vma insn;
20841 bfd_vma addend_abs = abs (value);
20842
20843 /* Check that the absolute value of the addend can be
20844 encoded in 12 bits. */
20845 if (addend_abs >= 0x1000)
20846 as_bad_where (fixP->fx_file, fixP->fx_line,
20847 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
20848 (unsigned long) addend_abs);
20849
20850 /* Extract the instruction. */
20851 insn = md_chars_to_number (buf, INSN_SIZE);
20852
20853 /* If the addend is negative, clear bit 23 of the instruction.
20854 Otherwise set it. */
20855 if (value < 0)
20856 insn &= ~(1 << 23);
20857 else
20858 insn |= 1 << 23;
20859
20860 /* Place the absolute value of the addend into the first 12 bits
20861 of the instruction. */
20862 insn &= 0xfffff000;
20863 insn |= addend_abs;
20864
20865 /* Update the instruction. */
20866 md_number_to_chars (buf, insn, INSN_SIZE);
20867 }
20868 break;
20869
20870 case BFD_RELOC_ARM_LDRS_PC_G0:
20871 case BFD_RELOC_ARM_LDRS_PC_G1:
20872 case BFD_RELOC_ARM_LDRS_PC_G2:
20873 case BFD_RELOC_ARM_LDRS_SB_G0:
20874 case BFD_RELOC_ARM_LDRS_SB_G1:
20875 case BFD_RELOC_ARM_LDRS_SB_G2:
20876 gas_assert (!fixP->fx_done);
20877 if (!seg->use_rela_p)
20878 {
20879 bfd_vma insn;
20880 bfd_vma addend_abs = abs (value);
20881
20882 /* Check that the absolute value of the addend can be
20883 encoded in 8 bits. */
20884 if (addend_abs >= 0x100)
20885 as_bad_where (fixP->fx_file, fixP->fx_line,
20886 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
20887 (unsigned long) addend_abs);
20888
20889 /* Extract the instruction. */
20890 insn = md_chars_to_number (buf, INSN_SIZE);
20891
20892 /* If the addend is negative, clear bit 23 of the instruction.
20893 Otherwise set it. */
20894 if (value < 0)
20895 insn &= ~(1 << 23);
20896 else
20897 insn |= 1 << 23;
20898
20899 /* Place the first four bits of the absolute value of the addend
20900 into the first 4 bits of the instruction, and the remaining
20901 four into bits 8 .. 11. */
20902 insn &= 0xfffff0f0;
20903 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
20904
20905 /* Update the instruction. */
20906 md_number_to_chars (buf, insn, INSN_SIZE);
20907 }
20908 break;
20909
20910 case BFD_RELOC_ARM_LDC_PC_G0:
20911 case BFD_RELOC_ARM_LDC_PC_G1:
20912 case BFD_RELOC_ARM_LDC_PC_G2:
20913 case BFD_RELOC_ARM_LDC_SB_G0:
20914 case BFD_RELOC_ARM_LDC_SB_G1:
20915 case BFD_RELOC_ARM_LDC_SB_G2:
20916 gas_assert (!fixP->fx_done);
20917 if (!seg->use_rela_p)
20918 {
20919 bfd_vma insn;
20920 bfd_vma addend_abs = abs (value);
20921
20922 /* Check that the absolute value of the addend is a multiple of
20923 four and, when divided by four, fits in 8 bits. */
20924 if (addend_abs & 0x3)
20925 as_bad_where (fixP->fx_file, fixP->fx_line,
20926 _("bad offset 0x%08lX (must be word-aligned)"),
20927 (unsigned long) addend_abs);
20928
20929 if ((addend_abs >> 2) > 0xff)
20930 as_bad_where (fixP->fx_file, fixP->fx_line,
20931 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
20932 (unsigned long) addend_abs);
20933
20934 /* Extract the instruction. */
20935 insn = md_chars_to_number (buf, INSN_SIZE);
20936
20937 /* If the addend is negative, clear bit 23 of the instruction.
20938 Otherwise set it. */
20939 if (value < 0)
20940 insn &= ~(1 << 23);
20941 else
20942 insn |= 1 << 23;
20943
20944 /* Place the addend (divided by four) into the first eight
20945 bits of the instruction. */
20946 insn &= 0xfffffff0;
20947 insn |= addend_abs >> 2;
20948
20949 /* Update the instruction. */
20950 md_number_to_chars (buf, insn, INSN_SIZE);
20951 }
20952 break;
20953
20954 case BFD_RELOC_ARM_V4BX:
20955 /* This will need to go in the object file. */
20956 fixP->fx_done = 0;
20957 break;
20958
20959 case BFD_RELOC_UNUSED:
20960 default:
20961 as_bad_where (fixP->fx_file, fixP->fx_line,
20962 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
20963 }
20964 }
20965
20966 /* Translate internal representation of relocation info to BFD target
20967 format. */
20968
20969 arelent *
20970 tc_gen_reloc (asection *section, fixS *fixp)
20971 {
20972 arelent * reloc;
20973 bfd_reloc_code_real_type code;
20974
20975 reloc = (arelent *) xmalloc (sizeof (arelent));
20976
20977 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
20978 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
20979 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
20980
20981 if (fixp->fx_pcrel)
20982 {
20983 if (section->use_rela_p)
20984 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
20985 else
20986 fixp->fx_offset = reloc->address;
20987 }
20988 reloc->addend = fixp->fx_offset;
20989
20990 switch (fixp->fx_r_type)
20991 {
20992 case BFD_RELOC_8:
20993 if (fixp->fx_pcrel)
20994 {
20995 code = BFD_RELOC_8_PCREL;
20996 break;
20997 }
20998
20999 case BFD_RELOC_16:
21000 if (fixp->fx_pcrel)
21001 {
21002 code = BFD_RELOC_16_PCREL;
21003 break;
21004 }
21005
21006 case BFD_RELOC_32:
21007 if (fixp->fx_pcrel)
21008 {
21009 code = BFD_RELOC_32_PCREL;
21010 break;
21011 }
21012
21013 case BFD_RELOC_ARM_MOVW:
21014 if (fixp->fx_pcrel)
21015 {
21016 code = BFD_RELOC_ARM_MOVW_PCREL;
21017 break;
21018 }
21019
21020 case BFD_RELOC_ARM_MOVT:
21021 if (fixp->fx_pcrel)
21022 {
21023 code = BFD_RELOC_ARM_MOVT_PCREL;
21024 break;
21025 }
21026
21027 case BFD_RELOC_ARM_THUMB_MOVW:
21028 if (fixp->fx_pcrel)
21029 {
21030 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
21031 break;
21032 }
21033
21034 case BFD_RELOC_ARM_THUMB_MOVT:
21035 if (fixp->fx_pcrel)
21036 {
21037 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
21038 break;
21039 }
21040
21041 case BFD_RELOC_NONE:
21042 case BFD_RELOC_ARM_PCREL_BRANCH:
21043 case BFD_RELOC_ARM_PCREL_BLX:
21044 case BFD_RELOC_RVA:
21045 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21046 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21047 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21048 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21049 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21050 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21051 case BFD_RELOC_VTABLE_ENTRY:
21052 case BFD_RELOC_VTABLE_INHERIT:
21053 #ifdef TE_PE
21054 case BFD_RELOC_32_SECREL:
21055 #endif
21056 code = fixp->fx_r_type;
21057 break;
21058
21059 case BFD_RELOC_THUMB_PCREL_BLX:
21060 #ifdef OBJ_ELF
21061 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21062 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
21063 else
21064 #endif
21065 code = BFD_RELOC_THUMB_PCREL_BLX;
21066 break;
21067
21068 case BFD_RELOC_ARM_LITERAL:
21069 case BFD_RELOC_ARM_HWLITERAL:
21070 /* If this is called then the a literal has
21071 been referenced across a section boundary. */
21072 as_bad_where (fixp->fx_file, fixp->fx_line,
21073 _("literal referenced across section boundary"));
21074 return NULL;
21075
21076 #ifdef OBJ_ELF
21077 case BFD_RELOC_ARM_GOT32:
21078 case BFD_RELOC_ARM_GOTOFF:
21079 case BFD_RELOC_ARM_GOT_PREL:
21080 case BFD_RELOC_ARM_PLT32:
21081 case BFD_RELOC_ARM_TARGET1:
21082 case BFD_RELOC_ARM_ROSEGREL32:
21083 case BFD_RELOC_ARM_SBREL32:
21084 case BFD_RELOC_ARM_PREL31:
21085 case BFD_RELOC_ARM_TARGET2:
21086 case BFD_RELOC_ARM_TLS_LE32:
21087 case BFD_RELOC_ARM_TLS_LDO32:
21088 case BFD_RELOC_ARM_PCREL_CALL:
21089 case BFD_RELOC_ARM_PCREL_JUMP:
21090 case BFD_RELOC_ARM_ALU_PC_G0_NC:
21091 case BFD_RELOC_ARM_ALU_PC_G0:
21092 case BFD_RELOC_ARM_ALU_PC_G1_NC:
21093 case BFD_RELOC_ARM_ALU_PC_G1:
21094 case BFD_RELOC_ARM_ALU_PC_G2:
21095 case BFD_RELOC_ARM_LDR_PC_G0:
21096 case BFD_RELOC_ARM_LDR_PC_G1:
21097 case BFD_RELOC_ARM_LDR_PC_G2:
21098 case BFD_RELOC_ARM_LDRS_PC_G0:
21099 case BFD_RELOC_ARM_LDRS_PC_G1:
21100 case BFD_RELOC_ARM_LDRS_PC_G2:
21101 case BFD_RELOC_ARM_LDC_PC_G0:
21102 case BFD_RELOC_ARM_LDC_PC_G1:
21103 case BFD_RELOC_ARM_LDC_PC_G2:
21104 case BFD_RELOC_ARM_ALU_SB_G0_NC:
21105 case BFD_RELOC_ARM_ALU_SB_G0:
21106 case BFD_RELOC_ARM_ALU_SB_G1_NC:
21107 case BFD_RELOC_ARM_ALU_SB_G1:
21108 case BFD_RELOC_ARM_ALU_SB_G2:
21109 case BFD_RELOC_ARM_LDR_SB_G0:
21110 case BFD_RELOC_ARM_LDR_SB_G1:
21111 case BFD_RELOC_ARM_LDR_SB_G2:
21112 case BFD_RELOC_ARM_LDRS_SB_G0:
21113 case BFD_RELOC_ARM_LDRS_SB_G1:
21114 case BFD_RELOC_ARM_LDRS_SB_G2:
21115 case BFD_RELOC_ARM_LDC_SB_G0:
21116 case BFD_RELOC_ARM_LDC_SB_G1:
21117 case BFD_RELOC_ARM_LDC_SB_G2:
21118 case BFD_RELOC_ARM_V4BX:
21119 code = fixp->fx_r_type;
21120 break;
21121
21122 case BFD_RELOC_ARM_TLS_GD32:
21123 case BFD_RELOC_ARM_TLS_IE32:
21124 case BFD_RELOC_ARM_TLS_LDM32:
21125 /* BFD will include the symbol's address in the addend.
21126 But we don't want that, so subtract it out again here. */
21127 if (!S_IS_COMMON (fixp->fx_addsy))
21128 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
21129 code = fixp->fx_r_type;
21130 break;
21131 #endif
21132
21133 case BFD_RELOC_ARM_IMMEDIATE:
21134 as_bad_where (fixp->fx_file, fixp->fx_line,
21135 _("internal relocation (type: IMMEDIATE) not fixed up"));
21136 return NULL;
21137
21138 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21139 as_bad_where (fixp->fx_file, fixp->fx_line,
21140 _("ADRL used for a symbol not defined in the same file"));
21141 return NULL;
21142
21143 case BFD_RELOC_ARM_OFFSET_IMM:
21144 if (section->use_rela_p)
21145 {
21146 code = fixp->fx_r_type;
21147 break;
21148 }
21149
21150 if (fixp->fx_addsy != NULL
21151 && !S_IS_DEFINED (fixp->fx_addsy)
21152 && S_IS_LOCAL (fixp->fx_addsy))
21153 {
21154 as_bad_where (fixp->fx_file, fixp->fx_line,
21155 _("undefined local label `%s'"),
21156 S_GET_NAME (fixp->fx_addsy));
21157 return NULL;
21158 }
21159
21160 as_bad_where (fixp->fx_file, fixp->fx_line,
21161 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21162 return NULL;
21163
21164 default:
21165 {
21166 char * type;
21167
21168 switch (fixp->fx_r_type)
21169 {
21170 case BFD_RELOC_NONE: type = "NONE"; break;
21171 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
21172 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
21173 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
21174 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
21175 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
21176 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
21177 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
21178 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
21179 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
21180 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
21181 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
21182 default: type = _("<unknown>"); break;
21183 }
21184 as_bad_where (fixp->fx_file, fixp->fx_line,
21185 _("cannot represent %s relocation in this object file format"),
21186 type);
21187 return NULL;
21188 }
21189 }
21190
21191 #ifdef OBJ_ELF
21192 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
21193 && GOT_symbol
21194 && fixp->fx_addsy == GOT_symbol)
21195 {
21196 code = BFD_RELOC_ARM_GOTPC;
21197 reloc->addend = fixp->fx_offset = reloc->address;
21198 }
21199 #endif
21200
21201 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
21202
21203 if (reloc->howto == NULL)
21204 {
21205 as_bad_where (fixp->fx_file, fixp->fx_line,
21206 _("cannot represent %s relocation in this object file format"),
21207 bfd_get_reloc_code_name (code));
21208 return NULL;
21209 }
21210
21211 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21212 vtable entry to be used in the relocation's section offset. */
21213 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21214 reloc->address = fixp->fx_offset;
21215
21216 return reloc;
21217 }
21218
21219 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
21220
21221 void
21222 cons_fix_new_arm (fragS * frag,
21223 int where,
21224 int size,
21225 expressionS * exp)
21226 {
21227 bfd_reloc_code_real_type type;
21228 int pcrel = 0;
21229
21230 /* Pick a reloc.
21231 FIXME: @@ Should look at CPU word size. */
21232 switch (size)
21233 {
21234 case 1:
21235 type = BFD_RELOC_8;
21236 break;
21237 case 2:
21238 type = BFD_RELOC_16;
21239 break;
21240 case 4:
21241 default:
21242 type = BFD_RELOC_32;
21243 break;
21244 case 8:
21245 type = BFD_RELOC_64;
21246 break;
21247 }
21248
21249 #ifdef TE_PE
21250 if (exp->X_op == O_secrel)
21251 {
21252 exp->X_op = O_symbol;
21253 type = BFD_RELOC_32_SECREL;
21254 }
21255 #endif
21256
21257 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
21258 }
21259
21260 #if defined (OBJ_COFF)
21261 void
21262 arm_validate_fix (fixS * fixP)
21263 {
21264 /* If the destination of the branch is a defined symbol which does not have
21265 the THUMB_FUNC attribute, then we must be calling a function which has
21266 the (interfacearm) attribute. We look for the Thumb entry point to that
21267 function and change the branch to refer to that function instead. */
21268 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
21269 && fixP->fx_addsy != NULL
21270 && S_IS_DEFINED (fixP->fx_addsy)
21271 && ! THUMB_IS_FUNC (fixP->fx_addsy))
21272 {
21273 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
21274 }
21275 }
21276 #endif
21277
21278
21279 int
21280 arm_force_relocation (struct fix * fixp)
21281 {
21282 #if defined (OBJ_COFF) && defined (TE_PE)
21283 if (fixp->fx_r_type == BFD_RELOC_RVA)
21284 return 1;
21285 #endif
21286
21287 /* In case we have a call or a branch to a function in ARM ISA mode from
21288 a thumb function or vice-versa force the relocation. These relocations
21289 are cleared off for some cores that might have blx and simple transformations
21290 are possible. */
21291
21292 #ifdef OBJ_ELF
21293 switch (fixp->fx_r_type)
21294 {
21295 case BFD_RELOC_ARM_PCREL_JUMP:
21296 case BFD_RELOC_ARM_PCREL_CALL:
21297 case BFD_RELOC_THUMB_PCREL_BLX:
21298 if (THUMB_IS_FUNC (fixp->fx_addsy))
21299 return 1;
21300 break;
21301
21302 case BFD_RELOC_ARM_PCREL_BLX:
21303 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21304 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21305 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21306 if (ARM_IS_FUNC (fixp->fx_addsy))
21307 return 1;
21308 break;
21309
21310 default:
21311 break;
21312 }
21313 #endif
21314
21315 /* Resolve these relocations even if the symbol is extern or weak. */
21316 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
21317 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
21318 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
21319 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
21320 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
21321 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
21322 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
21323 return 0;
21324
21325 /* Always leave these relocations for the linker. */
21326 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
21327 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
21328 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
21329 return 1;
21330
21331 /* Always generate relocations against function symbols. */
21332 if (fixp->fx_r_type == BFD_RELOC_32
21333 && fixp->fx_addsy
21334 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
21335 return 1;
21336
21337 return generic_force_reloc (fixp);
21338 }
21339
21340 #if defined (OBJ_ELF) || defined (OBJ_COFF)
21341 /* Relocations against function names must be left unadjusted,
21342 so that the linker can use this information to generate interworking
21343 stubs. The MIPS version of this function
21344 also prevents relocations that are mips-16 specific, but I do not
21345 know why it does this.
21346
21347 FIXME:
21348 There is one other problem that ought to be addressed here, but
21349 which currently is not: Taking the address of a label (rather
21350 than a function) and then later jumping to that address. Such
21351 addresses also ought to have their bottom bit set (assuming that
21352 they reside in Thumb code), but at the moment they will not. */
21353
21354 bfd_boolean
21355 arm_fix_adjustable (fixS * fixP)
21356 {
21357 if (fixP->fx_addsy == NULL)
21358 return 1;
21359
21360 /* Preserve relocations against symbols with function type. */
21361 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
21362 return FALSE;
21363
21364 if (THUMB_IS_FUNC (fixP->fx_addsy)
21365 && fixP->fx_subsy == NULL)
21366 return FALSE;
21367
21368 /* We need the symbol name for the VTABLE entries. */
21369 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
21370 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21371 return FALSE;
21372
21373 /* Don't allow symbols to be discarded on GOT related relocs. */
21374 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
21375 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
21376 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
21377 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
21378 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
21379 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
21380 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
21381 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
21382 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
21383 return FALSE;
21384
21385 /* Similarly for group relocations. */
21386 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
21387 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
21388 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
21389 return FALSE;
21390
21391 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21392 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
21393 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21394 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
21395 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
21396 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21397 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
21398 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
21399 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
21400 return FALSE;
21401
21402 return TRUE;
21403 }
21404 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21405
21406 #ifdef OBJ_ELF
21407
21408 const char *
21409 elf32_arm_target_format (void)
21410 {
21411 #ifdef TE_SYMBIAN
21412 return (target_big_endian
21413 ? "elf32-bigarm-symbian"
21414 : "elf32-littlearm-symbian");
21415 #elif defined (TE_VXWORKS)
21416 return (target_big_endian
21417 ? "elf32-bigarm-vxworks"
21418 : "elf32-littlearm-vxworks");
21419 #else
21420 if (target_big_endian)
21421 return "elf32-bigarm";
21422 else
21423 return "elf32-littlearm";
21424 #endif
21425 }
21426
21427 void
21428 armelf_frob_symbol (symbolS * symp,
21429 int * puntp)
21430 {
21431 elf_frob_symbol (symp, puntp);
21432 }
21433 #endif
21434
21435 /* MD interface: Finalization. */
21436
21437 void
21438 arm_cleanup (void)
21439 {
21440 literal_pool * pool;
21441
21442 /* Ensure that all the IT blocks are properly closed. */
21443 check_it_blocks_finished ();
21444
21445 for (pool = list_of_pools; pool; pool = pool->next)
21446 {
21447 /* Put it at the end of the relevant section. */
21448 subseg_set (pool->section, pool->sub_section);
21449 #ifdef OBJ_ELF
21450 arm_elf_change_section ();
21451 #endif
21452 s_ltorg (0);
21453 }
21454 }
21455
21456 #ifdef OBJ_ELF
21457 /* Remove any excess mapping symbols generated for alignment frags in
21458 SEC. We may have created a mapping symbol before a zero byte
21459 alignment; remove it if there's a mapping symbol after the
21460 alignment. */
21461 static void
21462 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
21463 void *dummy ATTRIBUTE_UNUSED)
21464 {
21465 segment_info_type *seginfo = seg_info (sec);
21466 fragS *fragp;
21467
21468 if (seginfo == NULL || seginfo->frchainP == NULL)
21469 return;
21470
21471 for (fragp = seginfo->frchainP->frch_root;
21472 fragp != NULL;
21473 fragp = fragp->fr_next)
21474 {
21475 symbolS *sym = fragp->tc_frag_data.last_map;
21476 fragS *next = fragp->fr_next;
21477
21478 /* Variable-sized frags have been converted to fixed size by
21479 this point. But if this was variable-sized to start with,
21480 there will be a fixed-size frag after it. So don't handle
21481 next == NULL. */
21482 if (sym == NULL || next == NULL)
21483 continue;
21484
21485 if (S_GET_VALUE (sym) < next->fr_address)
21486 /* Not at the end of this frag. */
21487 continue;
21488 know (S_GET_VALUE (sym) == next->fr_address);
21489
21490 do
21491 {
21492 if (next->tc_frag_data.first_map != NULL)
21493 {
21494 /* Next frag starts with a mapping symbol. Discard this
21495 one. */
21496 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21497 break;
21498 }
21499
21500 if (next->fr_next == NULL)
21501 {
21502 /* This mapping symbol is at the end of the section. Discard
21503 it. */
21504 know (next->fr_fix == 0 && next->fr_var == 0);
21505 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21506 break;
21507 }
21508
21509 /* As long as we have empty frags without any mapping symbols,
21510 keep looking. */
21511 /* If the next frag is non-empty and does not start with a
21512 mapping symbol, then this mapping symbol is required. */
21513 if (next->fr_address != next->fr_next->fr_address)
21514 break;
21515
21516 next = next->fr_next;
21517 }
21518 while (next != NULL);
21519 }
21520 }
21521 #endif
21522
21523 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21524 ARM ones. */
21525
21526 void
21527 arm_adjust_symtab (void)
21528 {
21529 #ifdef OBJ_COFF
21530 symbolS * sym;
21531
21532 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21533 {
21534 if (ARM_IS_THUMB (sym))
21535 {
21536 if (THUMB_IS_FUNC (sym))
21537 {
21538 /* Mark the symbol as a Thumb function. */
21539 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
21540 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
21541 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
21542
21543 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
21544 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
21545 else
21546 as_bad (_("%s: unexpected function type: %d"),
21547 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
21548 }
21549 else switch (S_GET_STORAGE_CLASS (sym))
21550 {
21551 case C_EXT:
21552 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
21553 break;
21554 case C_STAT:
21555 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
21556 break;
21557 case C_LABEL:
21558 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
21559 break;
21560 default:
21561 /* Do nothing. */
21562 break;
21563 }
21564 }
21565
21566 if (ARM_IS_INTERWORK (sym))
21567 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
21568 }
21569 #endif
21570 #ifdef OBJ_ELF
21571 symbolS * sym;
21572 char bind;
21573
21574 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21575 {
21576 if (ARM_IS_THUMB (sym))
21577 {
21578 elf_symbol_type * elf_sym;
21579
21580 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
21581 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
21582
21583 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
21584 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
21585 {
21586 /* If it's a .thumb_func, declare it as so,
21587 otherwise tag label as .code 16. */
21588 if (THUMB_IS_FUNC (sym))
21589 elf_sym->internal_elf_sym.st_info =
21590 ELF_ST_INFO (bind, STT_ARM_TFUNC);
21591 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
21592 elf_sym->internal_elf_sym.st_info =
21593 ELF_ST_INFO (bind, STT_ARM_16BIT);
21594 }
21595 }
21596 }
21597
21598 /* Remove any overlapping mapping symbols generated by alignment frags. */
21599 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
21600 #endif
21601 }
21602
21603 /* MD interface: Initialization. */
21604
21605 static void
21606 set_constant_flonums (void)
21607 {
21608 int i;
21609
21610 for (i = 0; i < NUM_FLOAT_VALS; i++)
21611 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
21612 abort ();
21613 }
21614
21615 /* Auto-select Thumb mode if it's the only available instruction set for the
21616 given architecture. */
21617
21618 static void
21619 autoselect_thumb_from_cpu_variant (void)
21620 {
21621 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
21622 opcode_select (16);
21623 }
21624
21625 void
21626 md_begin (void)
21627 {
21628 unsigned mach;
21629 unsigned int i;
21630
21631 if ( (arm_ops_hsh = hash_new ()) == NULL
21632 || (arm_cond_hsh = hash_new ()) == NULL
21633 || (arm_shift_hsh = hash_new ()) == NULL
21634 || (arm_psr_hsh = hash_new ()) == NULL
21635 || (arm_v7m_psr_hsh = hash_new ()) == NULL
21636 || (arm_reg_hsh = hash_new ()) == NULL
21637 || (arm_reloc_hsh = hash_new ()) == NULL
21638 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
21639 as_fatal (_("virtual memory exhausted"));
21640
21641 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
21642 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
21643 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
21644 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
21645 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
21646 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
21647 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
21648 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
21649 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
21650 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
21651 (void *) (v7m_psrs + i));
21652 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
21653 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
21654 for (i = 0;
21655 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
21656 i++)
21657 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
21658 (void *) (barrier_opt_names + i));
21659 #ifdef OBJ_ELF
21660 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
21661 hash_insert (arm_reloc_hsh, reloc_names[i].name, (void *) (reloc_names + i));
21662 #endif
21663
21664 set_constant_flonums ();
21665
21666 /* Set the cpu variant based on the command-line options. We prefer
21667 -mcpu= over -march= if both are set (as for GCC); and we prefer
21668 -mfpu= over any other way of setting the floating point unit.
21669 Use of legacy options with new options are faulted. */
21670 if (legacy_cpu)
21671 {
21672 if (mcpu_cpu_opt || march_cpu_opt)
21673 as_bad (_("use of old and new-style options to set CPU type"));
21674
21675 mcpu_cpu_opt = legacy_cpu;
21676 }
21677 else if (!mcpu_cpu_opt)
21678 mcpu_cpu_opt = march_cpu_opt;
21679
21680 if (legacy_fpu)
21681 {
21682 if (mfpu_opt)
21683 as_bad (_("use of old and new-style options to set FPU type"));
21684
21685 mfpu_opt = legacy_fpu;
21686 }
21687 else if (!mfpu_opt)
21688 {
21689 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21690 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21691 /* Some environments specify a default FPU. If they don't, infer it
21692 from the processor. */
21693 if (mcpu_fpu_opt)
21694 mfpu_opt = mcpu_fpu_opt;
21695 else
21696 mfpu_opt = march_fpu_opt;
21697 #else
21698 mfpu_opt = &fpu_default;
21699 #endif
21700 }
21701
21702 if (!mfpu_opt)
21703 {
21704 if (mcpu_cpu_opt != NULL)
21705 mfpu_opt = &fpu_default;
21706 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
21707 mfpu_opt = &fpu_arch_vfp_v2;
21708 else
21709 mfpu_opt = &fpu_arch_fpa;
21710 }
21711
21712 #ifdef CPU_DEFAULT
21713 if (!mcpu_cpu_opt)
21714 {
21715 mcpu_cpu_opt = &cpu_default;
21716 selected_cpu = cpu_default;
21717 }
21718 #else
21719 if (mcpu_cpu_opt)
21720 selected_cpu = *mcpu_cpu_opt;
21721 else
21722 mcpu_cpu_opt = &arm_arch_any;
21723 #endif
21724
21725 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21726
21727 autoselect_thumb_from_cpu_variant ();
21728
21729 arm_arch_used = thumb_arch_used = arm_arch_none;
21730
21731 #if defined OBJ_COFF || defined OBJ_ELF
21732 {
21733 unsigned int flags = 0;
21734
21735 #if defined OBJ_ELF
21736 flags = meabi_flags;
21737
21738 switch (meabi_flags)
21739 {
21740 case EF_ARM_EABI_UNKNOWN:
21741 #endif
21742 /* Set the flags in the private structure. */
21743 if (uses_apcs_26) flags |= F_APCS26;
21744 if (support_interwork) flags |= F_INTERWORK;
21745 if (uses_apcs_float) flags |= F_APCS_FLOAT;
21746 if (pic_code) flags |= F_PIC;
21747 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
21748 flags |= F_SOFT_FLOAT;
21749
21750 switch (mfloat_abi_opt)
21751 {
21752 case ARM_FLOAT_ABI_SOFT:
21753 case ARM_FLOAT_ABI_SOFTFP:
21754 flags |= F_SOFT_FLOAT;
21755 break;
21756
21757 case ARM_FLOAT_ABI_HARD:
21758 if (flags & F_SOFT_FLOAT)
21759 as_bad (_("hard-float conflicts with specified fpu"));
21760 break;
21761 }
21762
21763 /* Using pure-endian doubles (even if soft-float). */
21764 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
21765 flags |= F_VFP_FLOAT;
21766
21767 #if defined OBJ_ELF
21768 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
21769 flags |= EF_ARM_MAVERICK_FLOAT;
21770 break;
21771
21772 case EF_ARM_EABI_VER4:
21773 case EF_ARM_EABI_VER5:
21774 /* No additional flags to set. */
21775 break;
21776
21777 default:
21778 abort ();
21779 }
21780 #endif
21781 bfd_set_private_flags (stdoutput, flags);
21782
21783 /* We have run out flags in the COFF header to encode the
21784 status of ATPCS support, so instead we create a dummy,
21785 empty, debug section called .arm.atpcs. */
21786 if (atpcs)
21787 {
21788 asection * sec;
21789
21790 sec = bfd_make_section (stdoutput, ".arm.atpcs");
21791
21792 if (sec != NULL)
21793 {
21794 bfd_set_section_flags
21795 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
21796 bfd_set_section_size (stdoutput, sec, 0);
21797 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
21798 }
21799 }
21800 }
21801 #endif
21802
21803 /* Record the CPU type as well. */
21804 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
21805 mach = bfd_mach_arm_iWMMXt2;
21806 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
21807 mach = bfd_mach_arm_iWMMXt;
21808 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
21809 mach = bfd_mach_arm_XScale;
21810 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
21811 mach = bfd_mach_arm_ep9312;
21812 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
21813 mach = bfd_mach_arm_5TE;
21814 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
21815 {
21816 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
21817 mach = bfd_mach_arm_5T;
21818 else
21819 mach = bfd_mach_arm_5;
21820 }
21821 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
21822 {
21823 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
21824 mach = bfd_mach_arm_4T;
21825 else
21826 mach = bfd_mach_arm_4;
21827 }
21828 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
21829 mach = bfd_mach_arm_3M;
21830 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
21831 mach = bfd_mach_arm_3;
21832 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
21833 mach = bfd_mach_arm_2a;
21834 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
21835 mach = bfd_mach_arm_2;
21836 else
21837 mach = bfd_mach_arm_unknown;
21838
21839 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
21840 }
21841
21842 /* Command line processing. */
21843
21844 /* md_parse_option
21845 Invocation line includes a switch not recognized by the base assembler.
21846 See if it's a processor-specific option.
21847
21848 This routine is somewhat complicated by the need for backwards
21849 compatibility (since older releases of gcc can't be changed).
21850 The new options try to make the interface as compatible as
21851 possible with GCC.
21852
21853 New options (supported) are:
21854
21855 -mcpu=<cpu name> Assemble for selected processor
21856 -march=<architecture name> Assemble for selected architecture
21857 -mfpu=<fpu architecture> Assemble for selected FPU.
21858 -EB/-mbig-endian Big-endian
21859 -EL/-mlittle-endian Little-endian
21860 -k Generate PIC code
21861 -mthumb Start in Thumb mode
21862 -mthumb-interwork Code supports ARM/Thumb interworking
21863
21864 -m[no-]warn-deprecated Warn about deprecated features
21865
21866 For now we will also provide support for:
21867
21868 -mapcs-32 32-bit Program counter
21869 -mapcs-26 26-bit Program counter
21870 -macps-float Floats passed in FP registers
21871 -mapcs-reentrant Reentrant code
21872 -matpcs
21873 (sometime these will probably be replaced with -mapcs=<list of options>
21874 and -matpcs=<list of options>)
21875
21876 The remaining options are only supported for back-wards compatibility.
21877 Cpu variants, the arm part is optional:
21878 -m[arm]1 Currently not supported.
21879 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
21880 -m[arm]3 Arm 3 processor
21881 -m[arm]6[xx], Arm 6 processors
21882 -m[arm]7[xx][t][[d]m] Arm 7 processors
21883 -m[arm]8[10] Arm 8 processors
21884 -m[arm]9[20][tdmi] Arm 9 processors
21885 -mstrongarm[110[0]] StrongARM processors
21886 -mxscale XScale processors
21887 -m[arm]v[2345[t[e]]] Arm architectures
21888 -mall All (except the ARM1)
21889 FP variants:
21890 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
21891 -mfpe-old (No float load/store multiples)
21892 -mvfpxd VFP Single precision
21893 -mvfp All VFP
21894 -mno-fpu Disable all floating point instructions
21895
21896 The following CPU names are recognized:
21897 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
21898 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
21899 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
21900 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
21901 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
21902 arm10t arm10e, arm1020t, arm1020e, arm10200e,
21903 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
21904
21905 */
21906
21907 const char * md_shortopts = "m:k";
21908
21909 #ifdef ARM_BI_ENDIAN
21910 #define OPTION_EB (OPTION_MD_BASE + 0)
21911 #define OPTION_EL (OPTION_MD_BASE + 1)
21912 #else
21913 #if TARGET_BYTES_BIG_ENDIAN
21914 #define OPTION_EB (OPTION_MD_BASE + 0)
21915 #else
21916 #define OPTION_EL (OPTION_MD_BASE + 1)
21917 #endif
21918 #endif
21919 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
21920
21921 struct option md_longopts[] =
21922 {
21923 #ifdef OPTION_EB
21924 {"EB", no_argument, NULL, OPTION_EB},
21925 #endif
21926 #ifdef OPTION_EL
21927 {"EL", no_argument, NULL, OPTION_EL},
21928 #endif
21929 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
21930 {NULL, no_argument, NULL, 0}
21931 };
21932
21933 size_t md_longopts_size = sizeof (md_longopts);
21934
21935 struct arm_option_table
21936 {
21937 char *option; /* Option name to match. */
21938 char *help; /* Help information. */
21939 int *var; /* Variable to change. */
21940 int value; /* What to change it to. */
21941 char *deprecated; /* If non-null, print this message. */
21942 };
21943
21944 struct arm_option_table arm_opts[] =
21945 {
21946 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
21947 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
21948 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
21949 &support_interwork, 1, NULL},
21950 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
21951 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
21952 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
21953 1, NULL},
21954 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
21955 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
21956 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
21957 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
21958 NULL},
21959
21960 /* These are recognized by the assembler, but have no affect on code. */
21961 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
21962 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
21963
21964 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
21965 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
21966 &warn_on_deprecated, 0, NULL},
21967 {NULL, NULL, NULL, 0, NULL}
21968 };
21969
21970 struct arm_legacy_option_table
21971 {
21972 char *option; /* Option name to match. */
21973 const arm_feature_set **var; /* Variable to change. */
21974 const arm_feature_set value; /* What to change it to. */
21975 char *deprecated; /* If non-null, print this message. */
21976 };
21977
21978 const struct arm_legacy_option_table arm_legacy_opts[] =
21979 {
21980 /* DON'T add any new processors to this list -- we want the whole list
21981 to go away... Add them to the processors table instead. */
21982 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
21983 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
21984 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
21985 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
21986 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
21987 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
21988 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
21989 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
21990 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
21991 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
21992 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
21993 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
21994 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
21995 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
21996 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
21997 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
21998 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
21999 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
22000 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
22001 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
22002 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
22003 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
22004 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
22005 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
22006 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
22007 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
22008 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
22009 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
22010 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
22011 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
22012 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
22013 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
22014 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
22015 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
22016 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22017 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22018 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22019 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22020 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22021 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22022 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
22023 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
22024 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
22025 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
22026 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
22027 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
22028 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22029 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22030 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22031 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22032 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22033 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22034 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22035 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22036 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22037 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22038 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
22039 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
22040 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
22041 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
22042 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22043 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22044 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22045 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22046 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22047 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22048 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22049 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22050 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
22051 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
22052 N_("use -mcpu=strongarm110")},
22053 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
22054 N_("use -mcpu=strongarm1100")},
22055 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
22056 N_("use -mcpu=strongarm1110")},
22057 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
22058 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
22059 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
22060
22061 /* Architecture variants -- don't add any more to this list either. */
22062 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
22063 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
22064 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22065 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22066 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
22067 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
22068 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22069 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22070 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
22071 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
22072 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22073 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22074 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
22075 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
22076 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22077 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22078 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22079 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22080
22081 /* Floating point variants -- don't add any more to this list either. */
22082 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
22083 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
22084 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
22085 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
22086 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
22087
22088 {NULL, NULL, ARM_ARCH_NONE, NULL}
22089 };
22090
22091 struct arm_cpu_option_table
22092 {
22093 char *name;
22094 const arm_feature_set value;
22095 /* For some CPUs we assume an FPU unless the user explicitly sets
22096 -mfpu=... */
22097 const arm_feature_set default_fpu;
22098 /* The canonical name of the CPU, or NULL to use NAME converted to upper
22099 case. */
22100 const char *canonical_name;
22101 };
22102
22103 /* This list should, at a minimum, contain all the cpu names
22104 recognized by GCC. */
22105 static const struct arm_cpu_option_table arm_cpus[] =
22106 {
22107 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
22108 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
22109 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
22110 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
22111 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
22112 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22113 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22114 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22115 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22116 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22117 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22118 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
22119 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22120 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
22121 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22122 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
22123 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22124 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22125 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22126 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22127 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22128 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22129 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22130 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22131 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22132 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22133 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22134 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
22135 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22136 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22137 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22138 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22139 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22140 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22141 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22142 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22143 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22144 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22145 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22146 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
22147 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22148 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22149 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22150 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22151 {"fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22152 {"fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22153 /* For V5 or later processors we default to using VFP; but the user
22154 should really set the FPU type explicitly. */
22155 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22156 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22157 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22158 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22159 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
22160 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22161 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
22162 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22163 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22164 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
22165 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22166 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22167 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22168 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22169 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22170 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
22171 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22172 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22173 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22174 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
22175 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
22176 {"fa626te", ARM_ARCH_V5TE, FPU_NONE, NULL},
22177 {"fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22178 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
22179 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
22180 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
22181 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
22182 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
22183 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
22184 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
22185 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
22186 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
22187 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
22188 {"cortex-a5", ARM_ARCH_V7A, FPU_NONE, NULL},
22189 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE (0, FPU_VFP_V3
22190 | FPU_NEON_EXT_V1),
22191 NULL},
22192 {"cortex-a9", ARM_ARCH_V7A, ARM_FEATURE (0, FPU_VFP_V3
22193 | FPU_NEON_EXT_V1),
22194 NULL},
22195 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
22196 {"cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16, NULL},
22197 {"cortex-m4", ARM_ARCH_V7EM, FPU_NONE, NULL},
22198 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
22199 {"cortex-m1", ARM_ARCH_V6M, FPU_NONE, NULL},
22200 {"cortex-m0", ARM_ARCH_V6M, FPU_NONE, NULL},
22201 /* ??? XSCALE is really an architecture. */
22202 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22203 /* ??? iwmmxt is not a processor. */
22204 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
22205 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
22206 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22207 /* Maverick */
22208 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
22209 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
22210 };
22211
22212 struct arm_arch_option_table
22213 {
22214 char *name;
22215 const arm_feature_set value;
22216 const arm_feature_set default_fpu;
22217 };
22218
22219 /* This list should, at a minimum, contain all the architecture names
22220 recognized by GCC. */
22221 static const struct arm_arch_option_table arm_archs[] =
22222 {
22223 {"all", ARM_ANY, FPU_ARCH_FPA},
22224 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
22225 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
22226 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
22227 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
22228 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
22229 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
22230 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
22231 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
22232 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
22233 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
22234 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
22235 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
22236 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
22237 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
22238 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
22239 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
22240 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
22241 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
22242 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
22243 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
22244 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
22245 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
22246 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
22247 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
22248 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
22249 {"armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP},
22250 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
22251 /* The official spelling of the ARMv7 profile variants is the dashed form.
22252 Accept the non-dashed form for compatibility with old toolchains. */
22253 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
22254 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
22255 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
22256 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
22257 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
22258 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
22259 {"armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP},
22260 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
22261 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
22262 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
22263 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
22264 };
22265
22266 /* ISA extensions in the co-processor space. */
22267 struct arm_option_cpu_value_table
22268 {
22269 char *name;
22270 const arm_feature_set value;
22271 };
22272
22273 static const struct arm_option_cpu_value_table arm_extensions[] =
22274 {
22275 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
22276 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
22277 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
22278 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
22279 {NULL, ARM_ARCH_NONE}
22280 };
22281
22282 /* This list should, at a minimum, contain all the fpu names
22283 recognized by GCC. */
22284 static const struct arm_option_cpu_value_table arm_fpus[] =
22285 {
22286 {"softfpa", FPU_NONE},
22287 {"fpe", FPU_ARCH_FPE},
22288 {"fpe2", FPU_ARCH_FPE},
22289 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
22290 {"fpa", FPU_ARCH_FPA},
22291 {"fpa10", FPU_ARCH_FPA},
22292 {"fpa11", FPU_ARCH_FPA},
22293 {"arm7500fe", FPU_ARCH_FPA},
22294 {"softvfp", FPU_ARCH_VFP},
22295 {"softvfp+vfp", FPU_ARCH_VFP_V2},
22296 {"vfp", FPU_ARCH_VFP_V2},
22297 {"vfp9", FPU_ARCH_VFP_V2},
22298 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
22299 {"vfp10", FPU_ARCH_VFP_V2},
22300 {"vfp10-r0", FPU_ARCH_VFP_V1},
22301 {"vfpxd", FPU_ARCH_VFP_V1xD},
22302 {"vfpv2", FPU_ARCH_VFP_V2},
22303 {"vfpv3", FPU_ARCH_VFP_V3},
22304 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
22305 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
22306 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
22307 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
22308 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
22309 {"arm1020t", FPU_ARCH_VFP_V1},
22310 {"arm1020e", FPU_ARCH_VFP_V2},
22311 {"arm1136jfs", FPU_ARCH_VFP_V2},
22312 {"arm1136jf-s", FPU_ARCH_VFP_V2},
22313 {"maverick", FPU_ARCH_MAVERICK},
22314 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
22315 {"neon-fp16", FPU_ARCH_NEON_FP16},
22316 {"vfpv4", FPU_ARCH_VFP_V4},
22317 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
22318 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
22319 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
22320 {NULL, ARM_ARCH_NONE}
22321 };
22322
22323 struct arm_option_value_table
22324 {
22325 char *name;
22326 long value;
22327 };
22328
22329 static const struct arm_option_value_table arm_float_abis[] =
22330 {
22331 {"hard", ARM_FLOAT_ABI_HARD},
22332 {"softfp", ARM_FLOAT_ABI_SOFTFP},
22333 {"soft", ARM_FLOAT_ABI_SOFT},
22334 {NULL, 0}
22335 };
22336
22337 #ifdef OBJ_ELF
22338 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
22339 static const struct arm_option_value_table arm_eabis[] =
22340 {
22341 {"gnu", EF_ARM_EABI_UNKNOWN},
22342 {"4", EF_ARM_EABI_VER4},
22343 {"5", EF_ARM_EABI_VER5},
22344 {NULL, 0}
22345 };
22346 #endif
22347
22348 struct arm_long_option_table
22349 {
22350 char * option; /* Substring to match. */
22351 char * help; /* Help information. */
22352 int (* func) (char * subopt); /* Function to decode sub-option. */
22353 char * deprecated; /* If non-null, print this message. */
22354 };
22355
22356 static bfd_boolean
22357 arm_parse_extension (char * str, const arm_feature_set **opt_p)
22358 {
22359 arm_feature_set *ext_set = (arm_feature_set *)
22360 xmalloc (sizeof (arm_feature_set));
22361
22362 /* Copy the feature set, so that we can modify it. */
22363 *ext_set = **opt_p;
22364 *opt_p = ext_set;
22365
22366 while (str != NULL && *str != 0)
22367 {
22368 const struct arm_option_cpu_value_table * opt;
22369 char * ext;
22370 int optlen;
22371
22372 if (*str != '+')
22373 {
22374 as_bad (_("invalid architectural extension"));
22375 return FALSE;
22376 }
22377
22378 str++;
22379 ext = strchr (str, '+');
22380
22381 if (ext != NULL)
22382 optlen = ext - str;
22383 else
22384 optlen = strlen (str);
22385
22386 if (optlen == 0)
22387 {
22388 as_bad (_("missing architectural extension"));
22389 return FALSE;
22390 }
22391
22392 for (opt = arm_extensions; opt->name != NULL; opt++)
22393 if (strncmp (opt->name, str, optlen) == 0)
22394 {
22395 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
22396 break;
22397 }
22398
22399 if (opt->name == NULL)
22400 {
22401 as_bad (_("unknown architectural extension `%s'"), str);
22402 return FALSE;
22403 }
22404
22405 str = ext;
22406 };
22407
22408 return TRUE;
22409 }
22410
22411 static bfd_boolean
22412 arm_parse_cpu (char * str)
22413 {
22414 const struct arm_cpu_option_table * opt;
22415 char * ext = strchr (str, '+');
22416 int optlen;
22417
22418 if (ext != NULL)
22419 optlen = ext - str;
22420 else
22421 optlen = strlen (str);
22422
22423 if (optlen == 0)
22424 {
22425 as_bad (_("missing cpu name `%s'"), str);
22426 return FALSE;
22427 }
22428
22429 for (opt = arm_cpus; opt->name != NULL; opt++)
22430 if (strncmp (opt->name, str, optlen) == 0)
22431 {
22432 mcpu_cpu_opt = &opt->value;
22433 mcpu_fpu_opt = &opt->default_fpu;
22434 if (opt->canonical_name)
22435 strcpy (selected_cpu_name, opt->canonical_name);
22436 else
22437 {
22438 int i;
22439
22440 for (i = 0; i < optlen; i++)
22441 selected_cpu_name[i] = TOUPPER (opt->name[i]);
22442 selected_cpu_name[i] = 0;
22443 }
22444
22445 if (ext != NULL)
22446 return arm_parse_extension (ext, &mcpu_cpu_opt);
22447
22448 return TRUE;
22449 }
22450
22451 as_bad (_("unknown cpu `%s'"), str);
22452 return FALSE;
22453 }
22454
22455 static bfd_boolean
22456 arm_parse_arch (char * str)
22457 {
22458 const struct arm_arch_option_table *opt;
22459 char *ext = strchr (str, '+');
22460 int optlen;
22461
22462 if (ext != NULL)
22463 optlen = ext - str;
22464 else
22465 optlen = strlen (str);
22466
22467 if (optlen == 0)
22468 {
22469 as_bad (_("missing architecture name `%s'"), str);
22470 return FALSE;
22471 }
22472
22473 for (opt = arm_archs; opt->name != NULL; opt++)
22474 if (streq (opt->name, str))
22475 {
22476 march_cpu_opt = &opt->value;
22477 march_fpu_opt = &opt->default_fpu;
22478 strcpy (selected_cpu_name, opt->name);
22479
22480 if (ext != NULL)
22481 return arm_parse_extension (ext, &march_cpu_opt);
22482
22483 return TRUE;
22484 }
22485
22486 as_bad (_("unknown architecture `%s'\n"), str);
22487 return FALSE;
22488 }
22489
22490 static bfd_boolean
22491 arm_parse_fpu (char * str)
22492 {
22493 const struct arm_option_cpu_value_table * opt;
22494
22495 for (opt = arm_fpus; opt->name != NULL; opt++)
22496 if (streq (opt->name, str))
22497 {
22498 mfpu_opt = &opt->value;
22499 return TRUE;
22500 }
22501
22502 as_bad (_("unknown floating point format `%s'\n"), str);
22503 return FALSE;
22504 }
22505
22506 static bfd_boolean
22507 arm_parse_float_abi (char * str)
22508 {
22509 const struct arm_option_value_table * opt;
22510
22511 for (opt = arm_float_abis; opt->name != NULL; opt++)
22512 if (streq (opt->name, str))
22513 {
22514 mfloat_abi_opt = opt->value;
22515 return TRUE;
22516 }
22517
22518 as_bad (_("unknown floating point abi `%s'\n"), str);
22519 return FALSE;
22520 }
22521
22522 #ifdef OBJ_ELF
22523 static bfd_boolean
22524 arm_parse_eabi (char * str)
22525 {
22526 const struct arm_option_value_table *opt;
22527
22528 for (opt = arm_eabis; opt->name != NULL; opt++)
22529 if (streq (opt->name, str))
22530 {
22531 meabi_flags = opt->value;
22532 return TRUE;
22533 }
22534 as_bad (_("unknown EABI `%s'\n"), str);
22535 return FALSE;
22536 }
22537 #endif
22538
22539 static bfd_boolean
22540 arm_parse_it_mode (char * str)
22541 {
22542 bfd_boolean ret = TRUE;
22543
22544 if (streq ("arm", str))
22545 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
22546 else if (streq ("thumb", str))
22547 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
22548 else if (streq ("always", str))
22549 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
22550 else if (streq ("never", str))
22551 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
22552 else
22553 {
22554 as_bad (_("unknown implicit IT mode `%s', should be "\
22555 "arm, thumb, always, or never."), str);
22556 ret = FALSE;
22557 }
22558
22559 return ret;
22560 }
22561
22562 struct arm_long_option_table arm_long_opts[] =
22563 {
22564 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22565 arm_parse_cpu, NULL},
22566 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22567 arm_parse_arch, NULL},
22568 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22569 arm_parse_fpu, NULL},
22570 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22571 arm_parse_float_abi, NULL},
22572 #ifdef OBJ_ELF
22573 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22574 arm_parse_eabi, NULL},
22575 #endif
22576 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22577 arm_parse_it_mode, NULL},
22578 {NULL, NULL, 0, NULL}
22579 };
22580
22581 int
22582 md_parse_option (int c, char * arg)
22583 {
22584 struct arm_option_table *opt;
22585 const struct arm_legacy_option_table *fopt;
22586 struct arm_long_option_table *lopt;
22587
22588 switch (c)
22589 {
22590 #ifdef OPTION_EB
22591 case OPTION_EB:
22592 target_big_endian = 1;
22593 break;
22594 #endif
22595
22596 #ifdef OPTION_EL
22597 case OPTION_EL:
22598 target_big_endian = 0;
22599 break;
22600 #endif
22601
22602 case OPTION_FIX_V4BX:
22603 fix_v4bx = TRUE;
22604 break;
22605
22606 case 'a':
22607 /* Listing option. Just ignore these, we don't support additional
22608 ones. */
22609 return 0;
22610
22611 default:
22612 for (opt = arm_opts; opt->option != NULL; opt++)
22613 {
22614 if (c == opt->option[0]
22615 && ((arg == NULL && opt->option[1] == 0)
22616 || streq (arg, opt->option + 1)))
22617 {
22618 /* If the option is deprecated, tell the user. */
22619 if (warn_on_deprecated && opt->deprecated != NULL)
22620 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22621 arg ? arg : "", _(opt->deprecated));
22622
22623 if (opt->var != NULL)
22624 *opt->var = opt->value;
22625
22626 return 1;
22627 }
22628 }
22629
22630 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
22631 {
22632 if (c == fopt->option[0]
22633 && ((arg == NULL && fopt->option[1] == 0)
22634 || streq (arg, fopt->option + 1)))
22635 {
22636 /* If the option is deprecated, tell the user. */
22637 if (warn_on_deprecated && fopt->deprecated != NULL)
22638 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22639 arg ? arg : "", _(fopt->deprecated));
22640
22641 if (fopt->var != NULL)
22642 *fopt->var = &fopt->value;
22643
22644 return 1;
22645 }
22646 }
22647
22648 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
22649 {
22650 /* These options are expected to have an argument. */
22651 if (c == lopt->option[0]
22652 && arg != NULL
22653 && strncmp (arg, lopt->option + 1,
22654 strlen (lopt->option + 1)) == 0)
22655 {
22656 /* If the option is deprecated, tell the user. */
22657 if (warn_on_deprecated && lopt->deprecated != NULL)
22658 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
22659 _(lopt->deprecated));
22660
22661 /* Call the sup-option parser. */
22662 return lopt->func (arg + strlen (lopt->option) - 1);
22663 }
22664 }
22665
22666 return 0;
22667 }
22668
22669 return 1;
22670 }
22671
22672 void
22673 md_show_usage (FILE * fp)
22674 {
22675 struct arm_option_table *opt;
22676 struct arm_long_option_table *lopt;
22677
22678 fprintf (fp, _(" ARM-specific assembler options:\n"));
22679
22680 for (opt = arm_opts; opt->option != NULL; opt++)
22681 if (opt->help != NULL)
22682 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
22683
22684 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
22685 if (lopt->help != NULL)
22686 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
22687
22688 #ifdef OPTION_EB
22689 fprintf (fp, _("\
22690 -EB assemble code for a big-endian cpu\n"));
22691 #endif
22692
22693 #ifdef OPTION_EL
22694 fprintf (fp, _("\
22695 -EL assemble code for a little-endian cpu\n"));
22696 #endif
22697
22698 fprintf (fp, _("\
22699 --fix-v4bx Allow BX in ARMv4 code\n"));
22700 }
22701
22702
22703 #ifdef OBJ_ELF
22704 typedef struct
22705 {
22706 int val;
22707 arm_feature_set flags;
22708 } cpu_arch_ver_table;
22709
22710 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
22711 least features first. */
22712 static const cpu_arch_ver_table cpu_arch_ver[] =
22713 {
22714 {1, ARM_ARCH_V4},
22715 {2, ARM_ARCH_V4T},
22716 {3, ARM_ARCH_V5},
22717 {3, ARM_ARCH_V5T},
22718 {4, ARM_ARCH_V5TE},
22719 {5, ARM_ARCH_V5TEJ},
22720 {6, ARM_ARCH_V6},
22721 {7, ARM_ARCH_V6Z},
22722 {9, ARM_ARCH_V6K},
22723 {11, ARM_ARCH_V6M},
22724 {8, ARM_ARCH_V6T2},
22725 {10, ARM_ARCH_V7A},
22726 {10, ARM_ARCH_V7R},
22727 {10, ARM_ARCH_V7M},
22728 {0, ARM_ARCH_NONE}
22729 };
22730
22731 /* Set an attribute if it has not already been set by the user. */
22732 static void
22733 aeabi_set_attribute_int (int tag, int value)
22734 {
22735 if (tag < 1
22736 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
22737 || !attributes_set_explicitly[tag])
22738 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
22739 }
22740
22741 static void
22742 aeabi_set_attribute_string (int tag, const char *value)
22743 {
22744 if (tag < 1
22745 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
22746 || !attributes_set_explicitly[tag])
22747 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
22748 }
22749
22750 /* Set the public EABI object attributes. */
22751 static void
22752 aeabi_set_public_attributes (void)
22753 {
22754 int arch;
22755 arm_feature_set flags;
22756 arm_feature_set tmp;
22757 const cpu_arch_ver_table *p;
22758
22759 /* Choose the architecture based on the capabilities of the requested cpu
22760 (if any) and/or the instructions actually used. */
22761 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
22762 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
22763 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
22764 /*Allow the user to override the reported architecture. */
22765 if (object_arch)
22766 {
22767 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
22768 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
22769 }
22770
22771 tmp = flags;
22772 arch = 0;
22773 for (p = cpu_arch_ver; p->val; p++)
22774 {
22775 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
22776 {
22777 arch = p->val;
22778 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
22779 }
22780 }
22781
22782 /* The table lookup above finds the last architecture to contribute
22783 a new feature. Unfortunately, Tag13 is a subset of the union of
22784 v6T2 and v7-M, so it is never seen as contributing a new feature.
22785 We can not search for the last entry which is entirely used,
22786 because if no CPU is specified we build up only those flags
22787 actually used. Perhaps we should separate out the specified
22788 and implicit cases. Avoid taking this path for -march=all by
22789 checking for contradictory v7-A / v7-M features. */
22790 if (arch == 10
22791 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
22792 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
22793 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
22794 arch = 13;
22795
22796 /* Tag_CPU_name. */
22797 if (selected_cpu_name[0])
22798 {
22799 char *q;
22800
22801 q = selected_cpu_name;
22802 if (strncmp (q, "armv", 4) == 0)
22803 {
22804 int i;
22805
22806 q += 4;
22807 for (i = 0; q[i]; i++)
22808 q[i] = TOUPPER (q[i]);
22809 }
22810 aeabi_set_attribute_string (Tag_CPU_name, q);
22811 }
22812
22813 /* Tag_CPU_arch. */
22814 aeabi_set_attribute_int (Tag_CPU_arch, arch);
22815
22816 /* Tag_CPU_arch_profile. */
22817 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
22818 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
22819 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
22820 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
22821 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
22822 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
22823
22824 /* Tag_ARM_ISA_use. */
22825 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
22826 || arch == 0)
22827 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
22828
22829 /* Tag_THUMB_ISA_use. */
22830 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
22831 || arch == 0)
22832 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
22833 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
22834
22835 /* Tag_VFP_arch. */
22836 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
22837 aeabi_set_attribute_int (Tag_VFP_arch,
22838 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
22839 ? 5 : 6);
22840 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
22841 aeabi_set_attribute_int (Tag_VFP_arch, 3);
22842 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
22843 aeabi_set_attribute_int (Tag_VFP_arch, 4);
22844 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
22845 aeabi_set_attribute_int (Tag_VFP_arch, 2);
22846 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
22847 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
22848 aeabi_set_attribute_int (Tag_VFP_arch, 1);
22849
22850 /* Tag_WMMX_arch. */
22851 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
22852 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
22853 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
22854 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
22855
22856 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
22857 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
22858 aeabi_set_attribute_int
22859 (Tag_Advanced_SIMD_arch, (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma)
22860 ? 2 : 1));
22861
22862 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
22863 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16))
22864 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
22865 }
22866
22867 /* Add the default contents for the .ARM.attributes section. */
22868 void
22869 arm_md_end (void)
22870 {
22871 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
22872 return;
22873
22874 aeabi_set_public_attributes ();
22875 }
22876 #endif /* OBJ_ELF */
22877
22878
22879 /* Parse a .cpu directive. */
22880
22881 static void
22882 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
22883 {
22884 const struct arm_cpu_option_table *opt;
22885 char *name;
22886 char saved_char;
22887
22888 name = input_line_pointer;
22889 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22890 input_line_pointer++;
22891 saved_char = *input_line_pointer;
22892 *input_line_pointer = 0;
22893
22894 /* Skip the first "all" entry. */
22895 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
22896 if (streq (opt->name, name))
22897 {
22898 mcpu_cpu_opt = &opt->value;
22899 selected_cpu = opt->value;
22900 if (opt->canonical_name)
22901 strcpy (selected_cpu_name, opt->canonical_name);
22902 else
22903 {
22904 int i;
22905 for (i = 0; opt->name[i]; i++)
22906 selected_cpu_name[i] = TOUPPER (opt->name[i]);
22907 selected_cpu_name[i] = 0;
22908 }
22909 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22910 *input_line_pointer = saved_char;
22911 demand_empty_rest_of_line ();
22912 return;
22913 }
22914 as_bad (_("unknown cpu `%s'"), name);
22915 *input_line_pointer = saved_char;
22916 ignore_rest_of_line ();
22917 }
22918
22919
22920 /* Parse a .arch directive. */
22921
22922 static void
22923 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
22924 {
22925 const struct arm_arch_option_table *opt;
22926 char saved_char;
22927 char *name;
22928
22929 name = input_line_pointer;
22930 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22931 input_line_pointer++;
22932 saved_char = *input_line_pointer;
22933 *input_line_pointer = 0;
22934
22935 /* Skip the first "all" entry. */
22936 for (opt = arm_archs + 1; opt->name != NULL; opt++)
22937 if (streq (opt->name, name))
22938 {
22939 mcpu_cpu_opt = &opt->value;
22940 selected_cpu = opt->value;
22941 strcpy (selected_cpu_name, opt->name);
22942 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22943 *input_line_pointer = saved_char;
22944 demand_empty_rest_of_line ();
22945 return;
22946 }
22947
22948 as_bad (_("unknown architecture `%s'\n"), name);
22949 *input_line_pointer = saved_char;
22950 ignore_rest_of_line ();
22951 }
22952
22953
22954 /* Parse a .object_arch directive. */
22955
22956 static void
22957 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
22958 {
22959 const struct arm_arch_option_table *opt;
22960 char saved_char;
22961 char *name;
22962
22963 name = input_line_pointer;
22964 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22965 input_line_pointer++;
22966 saved_char = *input_line_pointer;
22967 *input_line_pointer = 0;
22968
22969 /* Skip the first "all" entry. */
22970 for (opt = arm_archs + 1; opt->name != NULL; opt++)
22971 if (streq (opt->name, name))
22972 {
22973 object_arch = &opt->value;
22974 *input_line_pointer = saved_char;
22975 demand_empty_rest_of_line ();
22976 return;
22977 }
22978
22979 as_bad (_("unknown architecture `%s'\n"), name);
22980 *input_line_pointer = saved_char;
22981 ignore_rest_of_line ();
22982 }
22983
22984 /* Parse a .fpu directive. */
22985
22986 static void
22987 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
22988 {
22989 const struct arm_option_cpu_value_table *opt;
22990 char saved_char;
22991 char *name;
22992
22993 name = input_line_pointer;
22994 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22995 input_line_pointer++;
22996 saved_char = *input_line_pointer;
22997 *input_line_pointer = 0;
22998
22999 for (opt = arm_fpus; opt->name != NULL; opt++)
23000 if (streq (opt->name, name))
23001 {
23002 mfpu_opt = &opt->value;
23003 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23004 *input_line_pointer = saved_char;
23005 demand_empty_rest_of_line ();
23006 return;
23007 }
23008
23009 as_bad (_("unknown floating point format `%s'\n"), name);
23010 *input_line_pointer = saved_char;
23011 ignore_rest_of_line ();
23012 }
23013
23014 /* Copy symbol information. */
23015
23016 void
23017 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
23018 {
23019 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
23020 }
23021
23022 #ifdef OBJ_ELF
23023 /* Given a symbolic attribute NAME, return the proper integer value.
23024 Returns -1 if the attribute is not known. */
23025
23026 int
23027 arm_convert_symbolic_attribute (const char *name)
23028 {
23029 static const struct
23030 {
23031 const char * name;
23032 const int tag;
23033 }
23034 attribute_table[] =
23035 {
23036 /* When you modify this table you should
23037 also modify the list in doc/c-arm.texi. */
23038 #define T(tag) {#tag, tag}
23039 T (Tag_CPU_raw_name),
23040 T (Tag_CPU_name),
23041 T (Tag_CPU_arch),
23042 T (Tag_CPU_arch_profile),
23043 T (Tag_ARM_ISA_use),
23044 T (Tag_THUMB_ISA_use),
23045 T (Tag_VFP_arch),
23046 T (Tag_WMMX_arch),
23047 T (Tag_Advanced_SIMD_arch),
23048 T (Tag_PCS_config),
23049 T (Tag_ABI_PCS_R9_use),
23050 T (Tag_ABI_PCS_RW_data),
23051 T (Tag_ABI_PCS_RO_data),
23052 T (Tag_ABI_PCS_GOT_use),
23053 T (Tag_ABI_PCS_wchar_t),
23054 T (Tag_ABI_FP_rounding),
23055 T (Tag_ABI_FP_denormal),
23056 T (Tag_ABI_FP_exceptions),
23057 T (Tag_ABI_FP_user_exceptions),
23058 T (Tag_ABI_FP_number_model),
23059 T (Tag_ABI_align8_needed),
23060 T (Tag_ABI_align8_preserved),
23061 T (Tag_ABI_enum_size),
23062 T (Tag_ABI_HardFP_use),
23063 T (Tag_ABI_VFP_args),
23064 T (Tag_ABI_WMMX_args),
23065 T (Tag_ABI_optimization_goals),
23066 T (Tag_ABI_FP_optimization_goals),
23067 T (Tag_compatibility),
23068 T (Tag_CPU_unaligned_access),
23069 T (Tag_VFP_HP_extension),
23070 T (Tag_ABI_FP_16bit_format),
23071 T (Tag_MPextension_use),
23072 T (Tag_DIV_use),
23073 T (Tag_nodefaults),
23074 T (Tag_also_compatible_with),
23075 T (Tag_conformance),
23076 T (Tag_T2EE_use),
23077 T (Tag_Virtualization_use),
23078 /* We deliberately do not include Tag_MPextension_use_legacy. */
23079 #undef T
23080 };
23081 unsigned int i;
23082
23083 if (name == NULL)
23084 return -1;
23085
23086 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
23087 if (streq (name, attribute_table[i].name))
23088 return attribute_table[i].tag;
23089
23090 return -1;
23091 }
23092
23093
23094 /* Apply sym value for relocations only in the case that
23095 they are for local symbols and you have the respective
23096 architectural feature for blx and simple switches. */
23097 int
23098 arm_apply_sym_value (struct fix * fixP)
23099 {
23100 if (fixP->fx_addsy
23101 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23102 && !S_IS_EXTERNAL (fixP->fx_addsy))
23103 {
23104 switch (fixP->fx_r_type)
23105 {
23106 case BFD_RELOC_ARM_PCREL_BLX:
23107 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23108 if (ARM_IS_FUNC (fixP->fx_addsy))
23109 return 1;
23110 break;
23111
23112 case BFD_RELOC_ARM_PCREL_CALL:
23113 case BFD_RELOC_THUMB_PCREL_BLX:
23114 if (THUMB_IS_FUNC (fixP->fx_addsy))
23115 return 1;
23116 break;
23117
23118 default:
23119 break;
23120 }
23121
23122 }
23123 return 0;
23124 }
23125 #endif /* OBJ_ELF */