1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
45 #define WARN_DEPRECATED 1
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
51 /* This structure holds the unwinding state. */
56 symbolS
* table_entry
;
57 symbolS
* personality_routine
;
58 int personality_index
;
59 /* The segment containing the function. */
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes
;
66 /* The number of bytes pushed to the stack. */
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset
;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
76 /* Nonzero if an unwind_setfp directive has been seen. */
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored
:1;
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency
= 0;
89 /* Results from operand parsing worker functions. */
93 PARSE_OPERAND_SUCCESS
,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result
;
101 ARM_FLOAT_ABI_SOFTFP
,
105 /* Types of processor to assemble for. */
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
132 #endif /* ifndef FPU_DEFAULT */
134 #define streq(a, b) (strcmp (a, b) == 0)
136 static arm_feature_set cpu_variant
;
137 static arm_feature_set arm_arch_used
;
138 static arm_feature_set thumb_arch_used
;
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26
= FALSE
;
142 static int atpcs
= FALSE
;
143 static int support_interwork
= FALSE
;
144 static int uses_apcs_float
= FALSE
;
145 static int pic_code
= FALSE
;
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
150 static const arm_feature_set
*legacy_cpu
= NULL
;
151 static const arm_feature_set
*legacy_fpu
= NULL
;
153 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
154 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
155 static const arm_feature_set
*march_cpu_opt
= NULL
;
156 static const arm_feature_set
*march_fpu_opt
= NULL
;
157 static const arm_feature_set
*mfpu_opt
= NULL
;
158 static const arm_feature_set
*object_arch
= NULL
;
160 /* Constants for known architecture features. */
161 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
162 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
163 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
164 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
165 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
166 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
167 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
168 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
169 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
172 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
175 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
176 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
177 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
178 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
179 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
180 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
181 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
182 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
183 static const arm_feature_set arm_ext_v4t_5
=
184 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
185 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
186 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
187 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
188 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
189 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
190 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
191 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
192 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
193 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
194 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
195 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
196 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
197 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
198 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
200 static const arm_feature_set arm_arch_any
= ARM_ANY
;
201 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
203 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
205 static const arm_feature_set arm_cext_iwmmxt2
=
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
207 static const arm_feature_set arm_cext_iwmmxt
=
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
209 static const arm_feature_set arm_cext_xscale
=
210 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
211 static const arm_feature_set arm_cext_maverick
=
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
213 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
214 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
215 static const arm_feature_set fpu_vfp_ext_v1xd
=
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
217 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
218 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
219 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
220 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
221 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
222 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
224 static int mfloat_abi_opt
= -1;
225 /* Record user cpu selection for object attributes. */
226 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
227 /* Must be long enough to hold any of the names in arm_cpus. */
228 static char selected_cpu_name
[16];
231 static int meabi_flags
= EABI_DEFAULT
;
233 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
238 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
239 symbolS
* GOT_symbol
;
242 /* 0: assemble for ARM,
243 1: assemble for Thumb,
244 2: assemble for Thumb even though target CPU does not support thumb
246 static int thumb_mode
= 0;
248 /* If unified_syntax is true, we are processing the new unified
249 ARM/Thumb syntax. Important differences from the old ARM mode:
251 - Immediate operands do not require a # prefix.
252 - Conditional affixes always appear at the end of the
253 instruction. (For backward compatibility, those instructions
254 that formerly had them in the middle, continue to accept them
256 - The IT instruction may appear, and if it does is validated
257 against subsequent conditional affixes. It does not generate
260 Important differences from the old Thumb mode:
262 - Immediate operands do not require a # prefix.
263 - Most of the V6T2 instructions are only available in unified mode.
264 - The .N and .W suffixes are recognized and honored (it is an error
265 if they cannot be honored).
266 - All instructions set the flags if and only if they have an 's' affix.
267 - Conditional affixes may be used. They are validated against
268 preceding IT instructions. Unlike ARM mode, you cannot use a
269 conditional affix except in the scope of an IT instruction. */
271 static bfd_boolean unified_syntax
= FALSE
;
286 enum neon_el_type type
;
290 #define NEON_MAX_TYPE_ELS 4
294 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
301 unsigned long instruction
;
305 /* "uncond_value" is set to the value in place of the conditional field in
306 unconditional versions of the instruction, or -1 if nothing is
309 struct neon_type vectype
;
310 /* Set to the opcode if the instruction needs relaxation.
311 Zero if the instruction is not relaxed. */
315 bfd_reloc_code_real_type type
;
324 struct neon_type_el vectype
;
325 unsigned present
: 1; /* Operand present. */
326 unsigned isreg
: 1; /* Operand was a register. */
327 unsigned immisreg
: 1; /* .imm field is a second register. */
328 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
329 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
330 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
331 instructions. This allows us to disambiguate ARM <-> vector insns. */
332 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
333 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
334 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
335 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
336 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
337 unsigned writeback
: 1; /* Operand has trailing ! */
338 unsigned preind
: 1; /* Preindexed address. */
339 unsigned postind
: 1; /* Postindexed address. */
340 unsigned negative
: 1; /* Index register was negated. */
341 unsigned shifted
: 1; /* Shift applied to operation. */
342 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
346 static struct arm_it inst
;
348 #define NUM_FLOAT_VALS 8
350 const char * fp_const
[] =
352 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
355 /* Number of littlenums required to hold an extended precision number. */
356 #define MAX_LITTLENUMS 6
358 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
368 #define CP_T_X 0x00008000
369 #define CP_T_Y 0x00400000
371 #define CONDS_BIT 0x00100000
372 #define LOAD_BIT 0x00100000
374 #define DOUBLE_LOAD_FLAG 0x00000001
378 const char * template;
382 #define COND_ALWAYS 0xE
386 const char *template;
390 struct asm_barrier_opt
392 const char *template;
396 /* The bit that distinguishes CPSR and SPSR. */
397 #define SPSR_BIT (1 << 22)
399 /* The individual PSR flag bits. */
400 #define PSR_c (1 << 16)
401 #define PSR_x (1 << 17)
402 #define PSR_s (1 << 18)
403 #define PSR_f (1 << 19)
408 bfd_reloc_code_real_type reloc
;
413 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
414 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
419 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
422 /* Bits for DEFINED field in neon_typed_alias. */
423 #define NTA_HASTYPE 1
424 #define NTA_HASINDEX 2
426 struct neon_typed_alias
428 unsigned char defined
;
430 struct neon_type_el eltype
;
433 /* ARM register categories. This includes coprocessor numbers and various
434 architecture extensions' registers. */
460 /* Structure for a hash table entry for a register.
461 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
462 information which states whether a vector type or index is specified (for a
463 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
467 unsigned char number
;
469 unsigned char builtin
;
470 struct neon_typed_alias
*neon
;
473 /* Diagnostics used when we don't get a register of the expected type. */
474 const char *const reg_expected_msgs
[] =
476 N_("ARM register expected"),
477 N_("bad or missing co-processor number"),
478 N_("co-processor register expected"),
479 N_("FPA register expected"),
480 N_("VFP single precision register expected"),
481 N_("VFP/Neon double precision register expected"),
482 N_("Neon quad precision register expected"),
483 N_("VFP single or double precision register expected"),
484 N_("Neon double or quad precision register expected"),
485 N_("VFP single, double or Neon quad precision register expected"),
486 N_("VFP system register expected"),
487 N_("Maverick MVF register expected"),
488 N_("Maverick MVD register expected"),
489 N_("Maverick MVFX register expected"),
490 N_("Maverick MVDX register expected"),
491 N_("Maverick MVAX register expected"),
492 N_("Maverick DSPSC register expected"),
493 N_("iWMMXt data register expected"),
494 N_("iWMMXt control register expected"),
495 N_("iWMMXt scalar register expected"),
496 N_("XScale accumulator register expected"),
499 /* Some well known registers that we refer to directly elsewhere. */
504 /* ARM instructions take 4bytes in the object file, Thumb instructions
510 /* Basic string to match. */
511 const char *template;
513 /* Parameters to instruction. */
514 unsigned char operands
[8];
516 /* Conditional tag - see opcode_lookup. */
517 unsigned int tag
: 4;
519 /* Basic instruction code. */
520 unsigned int avalue
: 28;
522 /* Thumb-format instruction code. */
525 /* Which architecture variant provides this instruction. */
526 const arm_feature_set
*avariant
;
527 const arm_feature_set
*tvariant
;
529 /* Function to call to encode instruction in ARM format. */
530 void (* aencode
) (void);
532 /* Function to call to encode instruction in Thumb format. */
533 void (* tencode
) (void);
536 /* Defines for various bits that we will want to toggle. */
537 #define INST_IMMEDIATE 0x02000000
538 #define OFFSET_REG 0x02000000
539 #define HWOFFSET_IMM 0x00400000
540 #define SHIFT_BY_REG 0x00000010
541 #define PRE_INDEX 0x01000000
542 #define INDEX_UP 0x00800000
543 #define WRITE_BACK 0x00200000
544 #define LDM_TYPE_2_OR_3 0x00400000
546 #define LITERAL_MASK 0xf000f000
547 #define OPCODE_MASK 0xfe1fffff
548 #define V4_STR_BIT 0x00000020
550 #define DATA_OP_SHIFT 21
552 #define T2_OPCODE_MASK 0xfe1fffff
553 #define T2_DATA_OP_SHIFT 21
555 /* Codes to distinguish the arithmetic instructions. */
566 #define OPCODE_CMP 10
567 #define OPCODE_CMN 11
568 #define OPCODE_ORR 12
569 #define OPCODE_MOV 13
570 #define OPCODE_BIC 14
571 #define OPCODE_MVN 15
573 #define T2_OPCODE_AND 0
574 #define T2_OPCODE_BIC 1
575 #define T2_OPCODE_ORR 2
576 #define T2_OPCODE_ORN 3
577 #define T2_OPCODE_EOR 4
578 #define T2_OPCODE_ADD 8
579 #define T2_OPCODE_ADC 10
580 #define T2_OPCODE_SBC 11
581 #define T2_OPCODE_SUB 13
582 #define T2_OPCODE_RSB 14
584 #define T_OPCODE_MUL 0x4340
585 #define T_OPCODE_TST 0x4200
586 #define T_OPCODE_CMN 0x42c0
587 #define T_OPCODE_NEG 0x4240
588 #define T_OPCODE_MVN 0x43c0
590 #define T_OPCODE_ADD_R3 0x1800
591 #define T_OPCODE_SUB_R3 0x1a00
592 #define T_OPCODE_ADD_HI 0x4400
593 #define T_OPCODE_ADD_ST 0xb000
594 #define T_OPCODE_SUB_ST 0xb080
595 #define T_OPCODE_ADD_SP 0xa800
596 #define T_OPCODE_ADD_PC 0xa000
597 #define T_OPCODE_ADD_I8 0x3000
598 #define T_OPCODE_SUB_I8 0x3800
599 #define T_OPCODE_ADD_I3 0x1c00
600 #define T_OPCODE_SUB_I3 0x1e00
602 #define T_OPCODE_ASR_R 0x4100
603 #define T_OPCODE_LSL_R 0x4080
604 #define T_OPCODE_LSR_R 0x40c0
605 #define T_OPCODE_ROR_R 0x41c0
606 #define T_OPCODE_ASR_I 0x1000
607 #define T_OPCODE_LSL_I 0x0000
608 #define T_OPCODE_LSR_I 0x0800
610 #define T_OPCODE_MOV_I8 0x2000
611 #define T_OPCODE_CMP_I8 0x2800
612 #define T_OPCODE_CMP_LR 0x4280
613 #define T_OPCODE_MOV_HR 0x4600
614 #define T_OPCODE_CMP_HR 0x4500
616 #define T_OPCODE_LDR_PC 0x4800
617 #define T_OPCODE_LDR_SP 0x9800
618 #define T_OPCODE_STR_SP 0x9000
619 #define T_OPCODE_LDR_IW 0x6800
620 #define T_OPCODE_STR_IW 0x6000
621 #define T_OPCODE_LDR_IH 0x8800
622 #define T_OPCODE_STR_IH 0x8000
623 #define T_OPCODE_LDR_IB 0x7800
624 #define T_OPCODE_STR_IB 0x7000
625 #define T_OPCODE_LDR_RW 0x5800
626 #define T_OPCODE_STR_RW 0x5000
627 #define T_OPCODE_LDR_RH 0x5a00
628 #define T_OPCODE_STR_RH 0x5200
629 #define T_OPCODE_LDR_RB 0x5c00
630 #define T_OPCODE_STR_RB 0x5400
632 #define T_OPCODE_PUSH 0xb400
633 #define T_OPCODE_POP 0xbc00
635 #define T_OPCODE_BRANCH 0xe000
637 #define THUMB_SIZE 2 /* Size of thumb instruction. */
638 #define THUMB_PP_PC_LR 0x0100
639 #define THUMB_LOAD_BIT 0x0800
640 #define THUMB2_LOAD_BIT 0x00100000
642 #define BAD_ARGS _("bad arguments to instruction")
643 #define BAD_PC _("r15 not allowed here")
644 #define BAD_COND _("instruction cannot be conditional")
645 #define BAD_OVERLAP _("registers may not be the same")
646 #define BAD_HIREG _("lo register required")
647 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
648 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
649 #define BAD_BRANCH _("branch must be last instruction in IT block")
650 #define BAD_NOT_IT _("instruction not allowed in IT block")
651 #define BAD_FPU _("selected FPU does not support instruction")
653 static struct hash_control
*arm_ops_hsh
;
654 static struct hash_control
*arm_cond_hsh
;
655 static struct hash_control
*arm_shift_hsh
;
656 static struct hash_control
*arm_psr_hsh
;
657 static struct hash_control
*arm_v7m_psr_hsh
;
658 static struct hash_control
*arm_reg_hsh
;
659 static struct hash_control
*arm_reloc_hsh
;
660 static struct hash_control
*arm_barrier_opt_hsh
;
662 /* Stuff needed to resolve the label ambiguity
672 symbolS
* last_label_seen
;
673 static int label_is_thumb_function_name
= FALSE
;
675 /* Literal pool structure. Held on a per-section
676 and per-sub-section basis. */
678 #define MAX_LITERAL_POOL_SIZE 1024
679 typedef struct literal_pool
681 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
682 unsigned int next_free_entry
;
687 struct literal_pool
* next
;
690 /* Pointer to a linked list of literal pools. */
691 literal_pool
* list_of_pools
= NULL
;
693 /* State variables for IT block handling. */
694 static bfd_boolean current_it_mask
= 0;
695 static int current_cc
;
700 /* This array holds the chars that always start a comment. If the
701 pre-processor is disabled, these aren't very useful. */
702 const char comment_chars
[] = "@";
704 /* This array holds the chars that only start a comment at the beginning of
705 a line. If the line seems to have the form '# 123 filename'
706 .line and .file directives will appear in the pre-processed output. */
707 /* Note that input_file.c hand checks for '#' at the beginning of the
708 first line of the input file. This is because the compiler outputs
709 #NO_APP at the beginning of its output. */
710 /* Also note that comments like this one will always work. */
711 const char line_comment_chars
[] = "#";
713 const char line_separator_chars
[] = ";";
715 /* Chars that can be used to separate mant
716 from exp in floating point numbers. */
717 const char EXP_CHARS
[] = "eE";
719 /* Chars that mean this number is a floating point constant. */
723 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
725 /* Prefix characters that indicate the start of an immediate
727 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
729 /* Separator character handling. */
731 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
734 skip_past_char (char ** str
, char c
)
744 #define skip_past_comma(str) skip_past_char (str, ',')
746 /* Arithmetic expressions (possibly involving symbols). */
748 /* Return TRUE if anything in the expression is a bignum. */
751 walk_no_bignums (symbolS
* sp
)
753 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
756 if (symbol_get_value_expression (sp
)->X_add_symbol
)
758 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
759 || (symbol_get_value_expression (sp
)->X_op_symbol
760 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
766 static int in_my_get_expression
= 0;
768 /* Third argument to my_get_expression. */
769 #define GE_NO_PREFIX 0
770 #define GE_IMM_PREFIX 1
771 #define GE_OPT_PREFIX 2
772 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
773 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
774 #define GE_OPT_PREFIX_BIG 3
777 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
782 /* In unified syntax, all prefixes are optional. */
784 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
789 case GE_NO_PREFIX
: break;
791 if (!is_immediate_prefix (**str
))
793 inst
.error
= _("immediate expression requires a # prefix");
799 case GE_OPT_PREFIX_BIG
:
800 if (is_immediate_prefix (**str
))
806 memset (ep
, 0, sizeof (expressionS
));
808 save_in
= input_line_pointer
;
809 input_line_pointer
= *str
;
810 in_my_get_expression
= 1;
811 seg
= expression (ep
);
812 in_my_get_expression
= 0;
814 if (ep
->X_op
== O_illegal
)
816 /* We found a bad expression in md_operand(). */
817 *str
= input_line_pointer
;
818 input_line_pointer
= save_in
;
819 if (inst
.error
== NULL
)
820 inst
.error
= _("bad expression");
825 if (seg
!= absolute_section
826 && seg
!= text_section
827 && seg
!= data_section
828 && seg
!= bss_section
829 && seg
!= undefined_section
)
831 inst
.error
= _("bad segment");
832 *str
= input_line_pointer
;
833 input_line_pointer
= save_in
;
838 /* Get rid of any bignums now, so that we don't generate an error for which
839 we can't establish a line number later on. Big numbers are never valid
840 in instructions, which is where this routine is always called. */
841 if (prefix_mode
!= GE_OPT_PREFIX_BIG
842 && (ep
->X_op
== O_big
844 && (walk_no_bignums (ep
->X_add_symbol
)
846 && walk_no_bignums (ep
->X_op_symbol
))))))
848 inst
.error
= _("invalid constant");
849 *str
= input_line_pointer
;
850 input_line_pointer
= save_in
;
854 *str
= input_line_pointer
;
855 input_line_pointer
= save_in
;
859 /* Turn a string in input_line_pointer into a floating point constant
860 of type TYPE, and store the appropriate bytes in *LITP. The number
861 of LITTLENUMS emitted is stored in *SIZEP. An error message is
862 returned, or NULL on OK.
864 Note that fp constants aren't represent in the normal way on the ARM.
865 In big endian mode, things are as expected. However, in little endian
866 mode fp constants are big-endian word-wise, and little-endian byte-wise
867 within the words. For example, (double) 1.1 in big endian mode is
868 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
869 the byte sequence 99 99 f1 3f 9a 99 99 99.
871 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
874 md_atof (int type
, char * litP
, int * sizeP
)
877 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
909 return _("bad call to MD_ATOF()");
912 t
= atof_ieee (input_line_pointer
, type
, words
);
914 input_line_pointer
= t
;
917 if (target_big_endian
)
919 for (i
= 0; i
< prec
; i
++)
921 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
927 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
928 for (i
= prec
- 1; i
>= 0; i
--)
930 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
934 /* For a 4 byte float the order of elements in `words' is 1 0.
935 For an 8 byte float the order is 1 0 3 2. */
936 for (i
= 0; i
< prec
; i
+= 2)
938 md_number_to_chars (litP
, (valueT
) words
[i
+ 1], 2);
939 md_number_to_chars (litP
+ 2, (valueT
) words
[i
], 2);
947 /* We handle all bad expressions here, so that we can report the faulty
948 instruction in the error message. */
950 md_operand (expressionS
* expr
)
952 if (in_my_get_expression
)
953 expr
->X_op
= O_illegal
;
956 /* Immediate values. */
958 /* Generic immediate-value read function for use in directives.
959 Accepts anything that 'expression' can fold to a constant.
960 *val receives the number. */
963 immediate_for_directive (int *val
)
966 exp
.X_op
= O_illegal
;
968 if (is_immediate_prefix (*input_line_pointer
))
970 input_line_pointer
++;
974 if (exp
.X_op
!= O_constant
)
976 as_bad (_("expected #constant"));
977 ignore_rest_of_line ();
980 *val
= exp
.X_add_number
;
985 /* Register parsing. */
987 /* Generic register parser. CCP points to what should be the
988 beginning of a register name. If it is indeed a valid register
989 name, advance CCP over it and return the reg_entry structure;
990 otherwise return NULL. Does not issue diagnostics. */
992 static struct reg_entry
*
993 arm_reg_parse_multi (char **ccp
)
997 struct reg_entry
*reg
;
999 #ifdef REGISTER_PREFIX
1000 if (*start
!= REGISTER_PREFIX
)
1004 #ifdef OPTIONAL_REGISTER_PREFIX
1005 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1010 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1015 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1017 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1027 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1028 enum arm_reg_type type
)
1030 /* Alternative syntaxes are accepted for a few register classes. */
1037 /* Generic coprocessor register names are allowed for these. */
1038 if (reg
&& reg
->type
== REG_TYPE_CN
)
1043 /* For backward compatibility, a bare number is valid here. */
1045 unsigned long processor
= strtoul (start
, ccp
, 10);
1046 if (*ccp
!= start
&& processor
<= 15)
1050 case REG_TYPE_MMXWC
:
1051 /* WC includes WCG. ??? I'm not sure this is true for all
1052 instructions that take WC registers. */
1053 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1064 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1065 return value is the register number or FAIL. */
1068 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1071 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1074 /* Do not allow a scalar (reg+index) to parse as a register. */
1075 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1078 if (reg
&& reg
->type
== type
)
1081 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1088 /* Parse a Neon type specifier. *STR should point at the leading '.'
1089 character. Does no verification at this stage that the type fits the opcode
1096 Can all be legally parsed by this function.
1098 Fills in neon_type struct pointer with parsed information, and updates STR
1099 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1100 type, FAIL if not. */
1103 parse_neon_type (struct neon_type
*type
, char **str
)
1110 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1112 enum neon_el_type thistype
= NT_untyped
;
1113 unsigned thissize
= -1u;
1120 /* Just a size without an explicit type. */
1124 switch (TOLOWER (*ptr
))
1126 case 'i': thistype
= NT_integer
; break;
1127 case 'f': thistype
= NT_float
; break;
1128 case 'p': thistype
= NT_poly
; break;
1129 case 's': thistype
= NT_signed
; break;
1130 case 'u': thistype
= NT_unsigned
; break;
1132 thistype
= NT_float
;
1137 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1143 /* .f is an abbreviation for .f32. */
1144 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1149 thissize
= strtoul (ptr
, &ptr
, 10);
1151 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1154 as_bad (_("bad size %d in type specifier"), thissize
);
1162 type
->el
[type
->elems
].type
= thistype
;
1163 type
->el
[type
->elems
].size
= thissize
;
1168 /* Empty/missing type is not a successful parse. */
1169 if (type
->elems
== 0)
1177 /* Errors may be set multiple times during parsing or bit encoding
1178 (particularly in the Neon bits), but usually the earliest error which is set
1179 will be the most meaningful. Avoid overwriting it with later (cascading)
1180 errors by calling this function. */
1183 first_error (const char *err
)
1189 /* Parse a single type, e.g. ".s32", leading period included. */
1191 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1194 struct neon_type optype
;
1198 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1200 if (optype
.elems
== 1)
1201 *vectype
= optype
.el
[0];
1204 first_error (_("only one type should be specified for operand"));
1210 first_error (_("vector type expected"));
1222 /* Special meanings for indices (which have a range of 0-7), which will fit into
1225 #define NEON_ALL_LANES 15
1226 #define NEON_INTERLEAVE_LANES 14
1228 /* Parse either a register or a scalar, with an optional type. Return the
1229 register number, and optionally fill in the actual type of the register
1230 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1231 type/index information in *TYPEINFO. */
1234 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1235 enum arm_reg_type
*rtype
,
1236 struct neon_typed_alias
*typeinfo
)
1239 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1240 struct neon_typed_alias atype
;
1241 struct neon_type_el parsetype
;
1245 atype
.eltype
.type
= NT_invtype
;
1246 atype
.eltype
.size
= -1;
1248 /* Try alternate syntax for some types of register. Note these are mutually
1249 exclusive with the Neon syntax extensions. */
1252 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1260 /* Undo polymorphism when a set of register types may be accepted. */
1261 if ((type
== REG_TYPE_NDQ
1262 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1263 || (type
== REG_TYPE_VFSD
1264 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1265 || (type
== REG_TYPE_NSDQ
1266 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1267 || reg
->type
== REG_TYPE_NQ
))
1268 || (type
== REG_TYPE_MMXWC
1269 && (reg
->type
== REG_TYPE_MMXWCG
)))
1272 if (type
!= reg
->type
)
1278 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1280 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1282 first_error (_("can't redefine type for operand"));
1285 atype
.defined
|= NTA_HASTYPE
;
1286 atype
.eltype
= parsetype
;
1289 if (skip_past_char (&str
, '[') == SUCCESS
)
1291 if (type
!= REG_TYPE_VFD
)
1293 first_error (_("only D registers may be indexed"));
1297 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1299 first_error (_("can't change index for operand"));
1303 atype
.defined
|= NTA_HASINDEX
;
1305 if (skip_past_char (&str
, ']') == SUCCESS
)
1306 atype
.index
= NEON_ALL_LANES
;
1311 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1313 if (exp
.X_op
!= O_constant
)
1315 first_error (_("constant expression required"));
1319 if (skip_past_char (&str
, ']') == FAIL
)
1322 atype
.index
= exp
.X_add_number
;
1337 /* Like arm_reg_parse, but allow allow the following extra features:
1338 - If RTYPE is non-zero, return the (possibly restricted) type of the
1339 register (e.g. Neon double or quad reg when either has been requested).
1340 - If this is a Neon vector type with additional type information, fill
1341 in the struct pointed to by VECTYPE (if non-NULL).
1342 This function will fault on encountering a scalar.
1346 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1347 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1349 struct neon_typed_alias atype
;
1351 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1356 /* Do not allow a scalar (reg+index) to parse as a register. */
1357 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1359 first_error (_("register operand expected, but got scalar"));
1364 *vectype
= atype
.eltype
;
1371 #define NEON_SCALAR_REG(X) ((X) >> 4)
1372 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1374 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1375 have enough information to be able to do a good job bounds-checking. So, we
1376 just do easy checks here, and do further checks later. */
1379 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1383 struct neon_typed_alias atype
;
1385 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1387 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1390 if (atype
.index
== NEON_ALL_LANES
)
1392 first_error (_("scalar must have an index"));
1395 else if (atype
.index
>= 64 / elsize
)
1397 first_error (_("scalar index out of range"));
1402 *type
= atype
.eltype
;
1406 return reg
* 16 + atype
.index
;
1409 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1411 parse_reg_list (char ** strp
)
1413 char * str
= * strp
;
1417 /* We come back here if we get ranges concatenated by '+' or '|'. */
1432 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1434 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1444 first_error (_("bad range in register list"));
1448 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1450 if (range
& (1 << i
))
1452 (_("Warning: duplicated register (r%d) in register list"),
1460 if (range
& (1 << reg
))
1461 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1463 else if (reg
<= cur_reg
)
1464 as_tsktsk (_("Warning: register range not in ascending order"));
1469 while (skip_past_comma (&str
) != FAIL
1470 || (in_range
= 1, *str
++ == '-'));
1475 first_error (_("missing `}'"));
1483 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1486 if (expr
.X_op
== O_constant
)
1488 if (expr
.X_add_number
1489 != (expr
.X_add_number
& 0x0000ffff))
1491 inst
.error
= _("invalid register mask");
1495 if ((range
& expr
.X_add_number
) != 0)
1497 int regno
= range
& expr
.X_add_number
;
1500 regno
= (1 << regno
) - 1;
1502 (_("Warning: duplicated register (r%d) in register list"),
1506 range
|= expr
.X_add_number
;
1510 if (inst
.reloc
.type
!= 0)
1512 inst
.error
= _("expression too complex");
1516 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1517 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1518 inst
.reloc
.pc_rel
= 0;
1522 if (*str
== '|' || *str
== '+')
1528 while (another_range
);
1534 /* Types of registers in a list. */
1543 /* Parse a VFP register list. If the string is invalid return FAIL.
1544 Otherwise return the number of registers, and set PBASE to the first
1545 register. Parses registers of type ETYPE.
1546 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1547 - Q registers can be used to specify pairs of D registers
1548 - { } can be omitted from around a singleton register list
1549 FIXME: This is not implemented, as it would require backtracking in
1552 This could be done (the meaning isn't really ambiguous), but doesn't
1553 fit in well with the current parsing framework.
1554 - 32 D registers may be used (also true for VFPv3).
1555 FIXME: Types are ignored in these register lists, which is probably a
1559 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1564 enum arm_reg_type regtype
= 0;
1568 unsigned long mask
= 0;
1573 inst
.error
= _("expecting {");
1582 regtype
= REG_TYPE_VFS
;
1587 regtype
= REG_TYPE_VFD
;
1590 case REGLIST_NEON_D
:
1591 regtype
= REG_TYPE_NDQ
;
1595 if (etype
!= REGLIST_VFP_S
)
1597 /* VFPv3 allows 32 D registers. */
1598 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
1602 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1605 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1612 base_reg
= max_regs
;
1616 int setmask
= 1, addregs
= 1;
1618 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1620 if (new_base
== FAIL
)
1622 first_error (_(reg_expected_msgs
[regtype
]));
1626 if (new_base
>= max_regs
)
1628 first_error (_("register out of range in list"));
1632 /* Note: a value of 2 * n is returned for the register Q<n>. */
1633 if (regtype
== REG_TYPE_NQ
)
1639 if (new_base
< base_reg
)
1640 base_reg
= new_base
;
1642 if (mask
& (setmask
<< new_base
))
1644 first_error (_("invalid register list"));
1648 if ((mask
>> new_base
) != 0 && ! warned
)
1650 as_tsktsk (_("register list not in ascending order"));
1654 mask
|= setmask
<< new_base
;
1657 if (*str
== '-') /* We have the start of a range expression */
1663 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1666 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1670 if (high_range
>= max_regs
)
1672 first_error (_("register out of range in list"));
1676 if (regtype
== REG_TYPE_NQ
)
1677 high_range
= high_range
+ 1;
1679 if (high_range
<= new_base
)
1681 inst
.error
= _("register range not in ascending order");
1685 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1687 if (mask
& (setmask
<< new_base
))
1689 inst
.error
= _("invalid register list");
1693 mask
|= setmask
<< new_base
;
1698 while (skip_past_comma (&str
) != FAIL
);
1702 /* Sanity check -- should have raised a parse error above. */
1703 if (count
== 0 || count
> max_regs
)
1708 /* Final test -- the registers must be consecutive. */
1710 for (i
= 0; i
< count
; i
++)
1712 if ((mask
& (1u << i
)) == 0)
1714 inst
.error
= _("non-contiguous register range");
1724 /* True if two alias types are the same. */
1727 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1735 if (a
->defined
!= b
->defined
)
1738 if ((a
->defined
& NTA_HASTYPE
) != 0
1739 && (a
->eltype
.type
!= b
->eltype
.type
1740 || a
->eltype
.size
!= b
->eltype
.size
))
1743 if ((a
->defined
& NTA_HASINDEX
) != 0
1744 && (a
->index
!= b
->index
))
1750 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1751 The base register is put in *PBASE.
1752 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1754 The register stride (minus one) is put in bit 4 of the return value.
1755 Bits [6:5] encode the list length (minus one).
1756 The type of the list elements is put in *ELTYPE, if non-NULL. */
1758 #define NEON_LANE(X) ((X) & 0xf)
1759 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1760 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1763 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1764 struct neon_type_el
*eltype
)
1771 int leading_brace
= 0;
1772 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1774 const char *const incr_error
= "register stride must be 1 or 2";
1775 const char *const type_error
= "mismatched element/structure types in list";
1776 struct neon_typed_alias firsttype
;
1778 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1783 struct neon_typed_alias atype
;
1784 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1788 first_error (_(reg_expected_msgs
[rtype
]));
1795 if (rtype
== REG_TYPE_NQ
)
1802 else if (reg_incr
== -1)
1804 reg_incr
= getreg
- base_reg
;
1805 if (reg_incr
< 1 || reg_incr
> 2)
1807 first_error (_(incr_error
));
1811 else if (getreg
!= base_reg
+ reg_incr
* count
)
1813 first_error (_(incr_error
));
1817 if (!neon_alias_types_same (&atype
, &firsttype
))
1819 first_error (_(type_error
));
1823 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1827 struct neon_typed_alias htype
;
1828 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1830 lane
= NEON_INTERLEAVE_LANES
;
1831 else if (lane
!= NEON_INTERLEAVE_LANES
)
1833 first_error (_(type_error
));
1838 else if (reg_incr
!= 1)
1840 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1844 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1847 first_error (_(reg_expected_msgs
[rtype
]));
1850 if (!neon_alias_types_same (&htype
, &firsttype
))
1852 first_error (_(type_error
));
1855 count
+= hireg
+ dregs
- getreg
;
1859 /* If we're using Q registers, we can't use [] or [n] syntax. */
1860 if (rtype
== REG_TYPE_NQ
)
1866 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1870 else if (lane
!= atype
.index
)
1872 first_error (_(type_error
));
1876 else if (lane
== -1)
1877 lane
= NEON_INTERLEAVE_LANES
;
1878 else if (lane
!= NEON_INTERLEAVE_LANES
)
1880 first_error (_(type_error
));
1885 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1887 /* No lane set by [x]. We must be interleaving structures. */
1889 lane
= NEON_INTERLEAVE_LANES
;
1892 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1893 || (count
> 1 && reg_incr
== -1))
1895 first_error (_("error parsing element/structure list"));
1899 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1901 first_error (_("expected }"));
1909 *eltype
= firsttype
.eltype
;
1914 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1917 /* Parse an explicit relocation suffix on an expression. This is
1918 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1919 arm_reloc_hsh contains no entries, so this function can only
1920 succeed if there is no () after the word. Returns -1 on error,
1921 BFD_RELOC_UNUSED if there wasn't any suffix. */
1923 parse_reloc (char **str
)
1925 struct reloc_entry
*r
;
1929 return BFD_RELOC_UNUSED
;
1934 while (*q
&& *q
!= ')' && *q
!= ',')
1939 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1946 /* Directives: register aliases. */
1948 static struct reg_entry
*
1949 insert_reg_alias (char *str
, int number
, int type
)
1951 struct reg_entry
*new;
1954 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1957 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1959 /* Only warn about a redefinition if it's not defined as the
1961 else if (new->number
!= number
|| new->type
!= type
)
1962 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1967 name
= xstrdup (str
);
1968 new = xmalloc (sizeof (struct reg_entry
));
1971 new->number
= number
;
1973 new->builtin
= FALSE
;
1976 if (hash_insert (arm_reg_hsh
, name
, (PTR
) new))
1983 insert_neon_reg_alias (char *str
, int number
, int type
,
1984 struct neon_typed_alias
*atype
)
1986 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
1990 first_error (_("attempt to redefine typed alias"));
1996 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
1997 *reg
->neon
= *atype
;
2001 /* Look for the .req directive. This is of the form:
2003 new_register_name .req existing_register_name
2005 If we find one, or if it looks sufficiently like one that we want to
2006 handle any error here, return non-zero. Otherwise return zero. */
2009 create_register_alias (char * newname
, char *p
)
2011 struct reg_entry
*old
;
2012 char *oldname
, *nbuf
;
2015 /* The input scrubber ensures that whitespace after the mnemonic is
2016 collapsed to single spaces. */
2018 if (strncmp (oldname
, " .req ", 6) != 0)
2022 if (*oldname
== '\0')
2025 old
= hash_find (arm_reg_hsh
, oldname
);
2028 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2032 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2033 the desired alias name, and p points to its end. If not, then
2034 the desired alias name is in the global original_case_string. */
2035 #ifdef TC_CASE_SENSITIVE
2038 newname
= original_case_string
;
2039 nlen
= strlen (newname
);
2042 nbuf
= alloca (nlen
+ 1);
2043 memcpy (nbuf
, newname
, nlen
);
2046 /* Create aliases under the new name as stated; an all-lowercase
2047 version of the new name; and an all-uppercase version of the new
2049 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2051 for (p
= nbuf
; *p
; p
++)
2054 if (strncmp (nbuf
, newname
, nlen
))
2055 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2057 for (p
= nbuf
; *p
; p
++)
2060 if (strncmp (nbuf
, newname
, nlen
))
2061 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2066 /* Create a Neon typed/indexed register alias using directives, e.g.:
2071 These typed registers can be used instead of the types specified after the
2072 Neon mnemonic, so long as all operands given have types. Types can also be
2073 specified directly, e.g.:
2074 vadd d0.s32, d1.s32, d2.s32
2078 create_neon_reg_alias (char *newname
, char *p
)
2080 enum arm_reg_type basetype
;
2081 struct reg_entry
*basereg
;
2082 struct reg_entry mybasereg
;
2083 struct neon_type ntype
;
2084 struct neon_typed_alias typeinfo
;
2085 char *namebuf
, *nameend
;
2088 typeinfo
.defined
= 0;
2089 typeinfo
.eltype
.type
= NT_invtype
;
2090 typeinfo
.eltype
.size
= -1;
2091 typeinfo
.index
= -1;
2095 if (strncmp (p
, " .dn ", 5) == 0)
2096 basetype
= REG_TYPE_VFD
;
2097 else if (strncmp (p
, " .qn ", 5) == 0)
2098 basetype
= REG_TYPE_NQ
;
2107 basereg
= arm_reg_parse_multi (&p
);
2109 if (basereg
&& basereg
->type
!= basetype
)
2111 as_bad (_("bad type for register"));
2115 if (basereg
== NULL
)
2118 /* Try parsing as an integer. */
2119 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2120 if (exp
.X_op
!= O_constant
)
2122 as_bad (_("expression must be constant"));
2125 basereg
= &mybasereg
;
2126 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2132 typeinfo
= *basereg
->neon
;
2134 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2136 /* We got a type. */
2137 if (typeinfo
.defined
& NTA_HASTYPE
)
2139 as_bad (_("can't redefine the type of a register alias"));
2143 typeinfo
.defined
|= NTA_HASTYPE
;
2144 if (ntype
.elems
!= 1)
2146 as_bad (_("you must specify a single type only"));
2149 typeinfo
.eltype
= ntype
.el
[0];
2152 if (skip_past_char (&p
, '[') == SUCCESS
)
2155 /* We got a scalar index. */
2157 if (typeinfo
.defined
& NTA_HASINDEX
)
2159 as_bad (_("can't redefine the index of a scalar alias"));
2163 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2165 if (exp
.X_op
!= O_constant
)
2167 as_bad (_("scalar index must be constant"));
2171 typeinfo
.defined
|= NTA_HASINDEX
;
2172 typeinfo
.index
= exp
.X_add_number
;
2174 if (skip_past_char (&p
, ']') == FAIL
)
2176 as_bad (_("expecting ]"));
2181 namelen
= nameend
- newname
;
2182 namebuf
= alloca (namelen
+ 1);
2183 strncpy (namebuf
, newname
, namelen
);
2184 namebuf
[namelen
] = '\0';
2186 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2187 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2189 /* Insert name in all uppercase. */
2190 for (p
= namebuf
; *p
; p
++)
2193 if (strncmp (namebuf
, newname
, namelen
))
2194 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2195 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2197 /* Insert name in all lowercase. */
2198 for (p
= namebuf
; *p
; p
++)
2201 if (strncmp (namebuf
, newname
, namelen
))
2202 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2203 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2208 /* Should never be called, as .req goes between the alias and the
2209 register name, not at the beginning of the line. */
2211 s_req (int a ATTRIBUTE_UNUSED
)
2213 as_bad (_("invalid syntax for .req directive"));
2217 s_dn (int a ATTRIBUTE_UNUSED
)
2219 as_bad (_("invalid syntax for .dn directive"));
2223 s_qn (int a ATTRIBUTE_UNUSED
)
2225 as_bad (_("invalid syntax for .qn directive"));
2228 /* The .unreq directive deletes an alias which was previously defined
2229 by .req. For example:
2235 s_unreq (int a ATTRIBUTE_UNUSED
)
2240 name
= input_line_pointer
;
2242 while (*input_line_pointer
!= 0
2243 && *input_line_pointer
!= ' '
2244 && *input_line_pointer
!= '\n')
2245 ++input_line_pointer
;
2247 saved_char
= *input_line_pointer
;
2248 *input_line_pointer
= 0;
2251 as_bad (_("invalid syntax for .unreq directive"));
2254 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2257 as_bad (_("unknown register alias '%s'"), name
);
2258 else if (reg
->builtin
)
2259 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2263 hash_delete (arm_reg_hsh
, name
);
2264 free ((char *) reg
->name
);
2271 *input_line_pointer
= saved_char
;
2272 demand_empty_rest_of_line ();
2275 /* Directives: Instruction set selection. */
2278 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2279 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2280 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2281 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2283 static enum mstate mapstate
= MAP_UNDEFINED
;
2286 mapping_state (enum mstate state
)
2289 const char * symname
;
2292 if (mapstate
== state
)
2293 /* The mapping symbol has already been emitted.
2294 There is nothing else to do. */
2303 type
= BSF_NO_FLAGS
;
2307 type
= BSF_NO_FLAGS
;
2311 type
= BSF_NO_FLAGS
;
2319 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2321 symbolP
= symbol_new (symname
, now_seg
, (valueT
) frag_now_fix (), frag_now
);
2322 symbol_table_insert (symbolP
);
2323 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2328 THUMB_SET_FUNC (symbolP
, 0);
2329 ARM_SET_THUMB (symbolP
, 0);
2330 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2334 THUMB_SET_FUNC (symbolP
, 1);
2335 ARM_SET_THUMB (symbolP
, 1);
2336 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2345 #define mapping_state(x) /* nothing */
2348 /* Find the real, Thumb encoded start of a Thumb function. */
2351 find_real_start (symbolS
* symbolP
)
2354 const char * name
= S_GET_NAME (symbolP
);
2355 symbolS
* new_target
;
2357 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2358 #define STUB_NAME ".real_start_of"
2363 /* The compiler may generate BL instructions to local labels because
2364 it needs to perform a branch to a far away location. These labels
2365 do not have a corresponding ".real_start_of" label. We check
2366 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2367 the ".real_start_of" convention for nonlocal branches. */
2368 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2371 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2372 new_target
= symbol_find (real_start
);
2374 if (new_target
== NULL
)
2376 as_warn ("Failed to find real start of function: %s\n", name
);
2377 new_target
= symbolP
;
2384 opcode_select (int width
)
2391 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2392 as_bad (_("selected processor does not support THUMB opcodes"));
2395 /* No need to force the alignment, since we will have been
2396 coming from ARM mode, which is word-aligned. */
2397 record_alignment (now_seg
, 1);
2399 mapping_state (MAP_THUMB
);
2405 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2406 as_bad (_("selected processor does not support ARM opcodes"));
2411 frag_align (2, 0, 0);
2413 record_alignment (now_seg
, 1);
2415 mapping_state (MAP_ARM
);
2419 as_bad (_("invalid instruction size selected (%d)"), width
);
2424 s_arm (int ignore ATTRIBUTE_UNUSED
)
2427 demand_empty_rest_of_line ();
2431 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2434 demand_empty_rest_of_line ();
2438 s_code (int unused ATTRIBUTE_UNUSED
)
2442 temp
= get_absolute_expression ();
2447 opcode_select (temp
);
2451 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2456 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2458 /* If we are not already in thumb mode go into it, EVEN if
2459 the target processor does not support thumb instructions.
2460 This is used by gcc/config/arm/lib1funcs.asm for example
2461 to compile interworking support functions even if the
2462 target processor should not support interworking. */
2466 record_alignment (now_seg
, 1);
2469 demand_empty_rest_of_line ();
2473 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2477 /* The following label is the name/address of the start of a Thumb function.
2478 We need to know this for the interworking support. */
2479 label_is_thumb_function_name
= TRUE
;
2482 /* Perform a .set directive, but also mark the alias as
2483 being a thumb function. */
2486 s_thumb_set (int equiv
)
2488 /* XXX the following is a duplicate of the code for s_set() in read.c
2489 We cannot just call that code as we need to get at the symbol that
2496 /* Especial apologies for the random logic:
2497 This just grew, and could be parsed much more simply!
2499 name
= input_line_pointer
;
2500 delim
= get_symbol_end ();
2501 end_name
= input_line_pointer
;
2504 if (*input_line_pointer
!= ',')
2507 as_bad (_("expected comma after name \"%s\""), name
);
2509 ignore_rest_of_line ();
2513 input_line_pointer
++;
2516 if (name
[0] == '.' && name
[1] == '\0')
2518 /* XXX - this should not happen to .thumb_set. */
2522 if ((symbolP
= symbol_find (name
)) == NULL
2523 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2526 /* When doing symbol listings, play games with dummy fragments living
2527 outside the normal fragment chain to record the file and line info
2529 if (listing
& LISTING_SYMBOLS
)
2531 extern struct list_info_struct
* listing_tail
;
2532 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2534 memset (dummy_frag
, 0, sizeof (fragS
));
2535 dummy_frag
->fr_type
= rs_fill
;
2536 dummy_frag
->line
= listing_tail
;
2537 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2538 dummy_frag
->fr_symbol
= symbolP
;
2542 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2545 /* "set" symbols are local unless otherwise specified. */
2546 SF_SET_LOCAL (symbolP
);
2547 #endif /* OBJ_COFF */
2548 } /* Make a new symbol. */
2550 symbol_table_insert (symbolP
);
2555 && S_IS_DEFINED (symbolP
)
2556 && S_GET_SEGMENT (symbolP
) != reg_section
)
2557 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2559 pseudo_set (symbolP
);
2561 demand_empty_rest_of_line ();
2563 /* XXX Now we come to the Thumb specific bit of code. */
2565 THUMB_SET_FUNC (symbolP
, 1);
2566 ARM_SET_THUMB (symbolP
, 1);
2567 #if defined OBJ_ELF || defined OBJ_COFF
2568 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2572 /* Directives: Mode selection. */
2574 /* .syntax [unified|divided] - choose the new unified syntax
2575 (same for Arm and Thumb encoding, modulo slight differences in what
2576 can be represented) or the old divergent syntax for each mode. */
2578 s_syntax (int unused ATTRIBUTE_UNUSED
)
2582 name
= input_line_pointer
;
2583 delim
= get_symbol_end ();
2585 if (!strcasecmp (name
, "unified"))
2586 unified_syntax
= TRUE
;
2587 else if (!strcasecmp (name
, "divided"))
2588 unified_syntax
= FALSE
;
2591 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2594 *input_line_pointer
= delim
;
2595 demand_empty_rest_of_line ();
2598 /* Directives: sectioning and alignment. */
2600 /* Same as s_align_ptwo but align 0 => align 2. */
2603 s_align (int unused ATTRIBUTE_UNUSED
)
2607 long max_alignment
= 15;
2609 temp
= get_absolute_expression ();
2610 if (temp
> max_alignment
)
2611 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2614 as_bad (_("alignment negative. 0 assumed."));
2618 if (*input_line_pointer
== ',')
2620 input_line_pointer
++;
2621 temp_fill
= get_absolute_expression ();
2629 /* Only make a frag if we HAVE to. */
2630 if (temp
&& !need_pass_2
)
2631 frag_align (temp
, (int) temp_fill
, 0);
2632 demand_empty_rest_of_line ();
2634 record_alignment (now_seg
, temp
);
2638 s_bss (int ignore ATTRIBUTE_UNUSED
)
2640 /* We don't support putting frags in the BSS segment, we fake it by
2641 marking in_bss, then looking at s_skip for clues. */
2642 subseg_set (bss_section
, 0);
2643 demand_empty_rest_of_line ();
2644 mapping_state (MAP_DATA
);
2648 s_even (int ignore ATTRIBUTE_UNUSED
)
2650 /* Never make frag if expect extra pass. */
2652 frag_align (1, 0, 0);
2654 record_alignment (now_seg
, 1);
2656 demand_empty_rest_of_line ();
2659 /* Directives: Literal pools. */
2661 static literal_pool
*
2662 find_literal_pool (void)
2664 literal_pool
* pool
;
2666 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2668 if (pool
->section
== now_seg
2669 && pool
->sub_section
== now_subseg
)
2676 static literal_pool
*
2677 find_or_make_literal_pool (void)
2679 /* Next literal pool ID number. */
2680 static unsigned int latest_pool_num
= 1;
2681 literal_pool
* pool
;
2683 pool
= find_literal_pool ();
2687 /* Create a new pool. */
2688 pool
= xmalloc (sizeof (* pool
));
2692 pool
->next_free_entry
= 0;
2693 pool
->section
= now_seg
;
2694 pool
->sub_section
= now_subseg
;
2695 pool
->next
= list_of_pools
;
2696 pool
->symbol
= NULL
;
2698 /* Add it to the list. */
2699 list_of_pools
= pool
;
2702 /* New pools, and emptied pools, will have a NULL symbol. */
2703 if (pool
->symbol
== NULL
)
2705 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2706 (valueT
) 0, &zero_address_frag
);
2707 pool
->id
= latest_pool_num
++;
2714 /* Add the literal in the global 'inst'
2715 structure to the relevent literal pool. */
2718 add_to_lit_pool (void)
2720 literal_pool
* pool
;
2723 pool
= find_or_make_literal_pool ();
2725 /* Check if this literal value is already in the pool. */
2726 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2728 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2729 && (inst
.reloc
.exp
.X_op
== O_constant
)
2730 && (pool
->literals
[entry
].X_add_number
2731 == inst
.reloc
.exp
.X_add_number
)
2732 && (pool
->literals
[entry
].X_unsigned
2733 == inst
.reloc
.exp
.X_unsigned
))
2736 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2737 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2738 && (pool
->literals
[entry
].X_add_number
2739 == inst
.reloc
.exp
.X_add_number
)
2740 && (pool
->literals
[entry
].X_add_symbol
2741 == inst
.reloc
.exp
.X_add_symbol
)
2742 && (pool
->literals
[entry
].X_op_symbol
2743 == inst
.reloc
.exp
.X_op_symbol
))
2747 /* Do we need to create a new entry? */
2748 if (entry
== pool
->next_free_entry
)
2750 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2752 inst
.error
= _("literal pool overflow");
2756 pool
->literals
[entry
] = inst
.reloc
.exp
;
2757 pool
->next_free_entry
+= 1;
2760 inst
.reloc
.exp
.X_op
= O_symbol
;
2761 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
2762 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2767 /* Can't use symbol_new here, so have to create a symbol and then at
2768 a later date assign it a value. Thats what these functions do. */
2771 symbol_locate (symbolS
* symbolP
,
2772 const char * name
, /* It is copied, the caller can modify. */
2773 segT segment
, /* Segment identifier (SEG_<something>). */
2774 valueT valu
, /* Symbol value. */
2775 fragS
* frag
) /* Associated fragment. */
2777 unsigned int name_length
;
2778 char * preserved_copy_of_name
;
2780 name_length
= strlen (name
) + 1; /* +1 for \0. */
2781 obstack_grow (¬es
, name
, name_length
);
2782 preserved_copy_of_name
= obstack_finish (¬es
);
2784 #ifdef tc_canonicalize_symbol_name
2785 preserved_copy_of_name
=
2786 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2789 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2791 S_SET_SEGMENT (symbolP
, segment
);
2792 S_SET_VALUE (symbolP
, valu
);
2793 symbol_clear_list_pointers (symbolP
);
2795 symbol_set_frag (symbolP
, frag
);
2797 /* Link to end of symbol chain. */
2799 extern int symbol_table_frozen
;
2801 if (symbol_table_frozen
)
2805 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2807 obj_symbol_new_hook (symbolP
);
2809 #ifdef tc_symbol_new_hook
2810 tc_symbol_new_hook (symbolP
);
2814 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2815 #endif /* DEBUG_SYMS */
2820 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
2823 literal_pool
* pool
;
2826 pool
= find_literal_pool ();
2828 || pool
->symbol
== NULL
2829 || pool
->next_free_entry
== 0)
2832 mapping_state (MAP_DATA
);
2834 /* Align pool as you have word accesses.
2835 Only make a frag if we have to. */
2837 frag_align (2, 0, 0);
2839 record_alignment (now_seg
, 2);
2841 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2843 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2844 (valueT
) frag_now_fix (), frag_now
);
2845 symbol_table_insert (pool
->symbol
);
2847 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2849 #if defined OBJ_COFF || defined OBJ_ELF
2850 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2853 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2854 /* First output the expression in the instruction to the pool. */
2855 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2857 /* Mark the pool as empty. */
2858 pool
->next_free_entry
= 0;
2859 pool
->symbol
= NULL
;
2863 /* Forward declarations for functions below, in the MD interface
2865 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
2866 static valueT
create_unwind_entry (int);
2867 static void start_unwind_section (const segT
, int);
2868 static void add_unwind_opcode (valueT
, int);
2869 static void flush_pending_unwind (void);
2871 /* Directives: Data. */
2874 s_arm_elf_cons (int nbytes
)
2878 #ifdef md_flush_pending_output
2879 md_flush_pending_output ();
2882 if (is_it_end_of_statement ())
2884 demand_empty_rest_of_line ();
2888 #ifdef md_cons_align
2889 md_cons_align (nbytes
);
2892 mapping_state (MAP_DATA
);
2896 char *base
= input_line_pointer
;
2900 if (exp
.X_op
!= O_symbol
)
2901 emit_expr (&exp
, (unsigned int) nbytes
);
2904 char *before_reloc
= input_line_pointer
;
2905 reloc
= parse_reloc (&input_line_pointer
);
2908 as_bad (_("unrecognized relocation suffix"));
2909 ignore_rest_of_line ();
2912 else if (reloc
== BFD_RELOC_UNUSED
)
2913 emit_expr (&exp
, (unsigned int) nbytes
);
2916 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
2917 int size
= bfd_get_reloc_size (howto
);
2919 if (reloc
== BFD_RELOC_ARM_PLT32
)
2921 as_bad (_("(plt) is only valid on branch targets"));
2922 reloc
= BFD_RELOC_UNUSED
;
2927 as_bad (_("%s relocations do not fit in %d bytes"),
2928 howto
->name
, nbytes
);
2931 /* We've parsed an expression stopping at O_symbol.
2932 But there may be more expression left now that we
2933 have parsed the relocation marker. Parse it again.
2934 XXX Surely there is a cleaner way to do this. */
2935 char *p
= input_line_pointer
;
2937 char *save_buf
= alloca (input_line_pointer
- base
);
2938 memcpy (save_buf
, base
, input_line_pointer
- base
);
2939 memmove (base
+ (input_line_pointer
- before_reloc
),
2940 base
, before_reloc
- base
);
2942 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
2944 memcpy (base
, save_buf
, p
- base
);
2946 offset
= nbytes
- size
;
2947 p
= frag_more ((int) nbytes
);
2948 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
2949 size
, &exp
, 0, reloc
);
2954 while (*input_line_pointer
++ == ',');
2956 /* Put terminator back into stream. */
2957 input_line_pointer
--;
2958 demand_empty_rest_of_line ();
2962 /* Parse a .rel31 directive. */
2965 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
2972 if (*input_line_pointer
== '1')
2973 highbit
= 0x80000000;
2974 else if (*input_line_pointer
!= '0')
2975 as_bad (_("expected 0 or 1"));
2977 input_line_pointer
++;
2978 if (*input_line_pointer
!= ',')
2979 as_bad (_("missing comma"));
2980 input_line_pointer
++;
2982 #ifdef md_flush_pending_output
2983 md_flush_pending_output ();
2986 #ifdef md_cons_align
2990 mapping_state (MAP_DATA
);
2995 md_number_to_chars (p
, highbit
, 4);
2996 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
2997 BFD_RELOC_ARM_PREL31
);
2999 demand_empty_rest_of_line ();
3002 /* Directives: AEABI stack-unwind tables. */
3004 /* Parse an unwind_fnstart directive. Simply records the current location. */
3007 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3009 demand_empty_rest_of_line ();
3010 /* Mark the start of the function. */
3011 unwind
.proc_start
= expr_build_dot ();
3013 /* Reset the rest of the unwind info. */
3014 unwind
.opcode_count
= 0;
3015 unwind
.table_entry
= NULL
;
3016 unwind
.personality_routine
= NULL
;
3017 unwind
.personality_index
= -1;
3018 unwind
.frame_size
= 0;
3019 unwind
.fp_offset
= 0;
3022 unwind
.sp_restored
= 0;
3026 /* Parse a handlerdata directive. Creates the exception handling table entry
3027 for the function. */
3030 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3032 demand_empty_rest_of_line ();
3033 if (unwind
.table_entry
)
3034 as_bad (_("dupicate .handlerdata directive"));
3036 create_unwind_entry (1);
3039 /* Parse an unwind_fnend directive. Generates the index table entry. */
3042 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3048 demand_empty_rest_of_line ();
3050 /* Add eh table entry. */
3051 if (unwind
.table_entry
== NULL
)
3052 val
= create_unwind_entry (0);
3056 /* Add index table entry. This is two words. */
3057 start_unwind_section (unwind
.saved_seg
, 1);
3058 frag_align (2, 0, 0);
3059 record_alignment (now_seg
, 2);
3061 ptr
= frag_more (8);
3062 where
= frag_now_fix () - 8;
3064 /* Self relative offset of the function start. */
3065 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3066 BFD_RELOC_ARM_PREL31
);
3068 /* Indicate dependency on EHABI-defined personality routines to the
3069 linker, if it hasn't been done already. */
3070 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3071 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3073 static const char *const name
[] = {
3074 "__aeabi_unwind_cpp_pr0",
3075 "__aeabi_unwind_cpp_pr1",
3076 "__aeabi_unwind_cpp_pr2"
3078 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3079 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3080 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3081 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3082 = marked_pr_dependency
;
3086 /* Inline exception table entry. */
3087 md_number_to_chars (ptr
+ 4, val
, 4);
3089 /* Self relative offset of the table entry. */
3090 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3091 BFD_RELOC_ARM_PREL31
);
3093 /* Restore the original section. */
3094 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3098 /* Parse an unwind_cantunwind directive. */
3101 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3103 demand_empty_rest_of_line ();
3104 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3105 as_bad (_("personality routine specified for cantunwind frame"));
3107 unwind
.personality_index
= -2;
3111 /* Parse a personalityindex directive. */
3114 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3118 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3119 as_bad (_("duplicate .personalityindex directive"));
3123 if (exp
.X_op
!= O_constant
3124 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3126 as_bad (_("bad personality routine number"));
3127 ignore_rest_of_line ();
3131 unwind
.personality_index
= exp
.X_add_number
;
3133 demand_empty_rest_of_line ();
3137 /* Parse a personality directive. */
3140 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3144 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3145 as_bad (_("duplicate .personality directive"));
3147 name
= input_line_pointer
;
3148 c
= get_symbol_end ();
3149 p
= input_line_pointer
;
3150 unwind
.personality_routine
= symbol_find_or_make (name
);
3152 demand_empty_rest_of_line ();
3156 /* Parse a directive saving core registers. */
3159 s_arm_unwind_save_core (void)
3165 range
= parse_reg_list (&input_line_pointer
);
3168 as_bad (_("expected register list"));
3169 ignore_rest_of_line ();
3173 demand_empty_rest_of_line ();
3175 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3176 into .unwind_save {..., sp...}. We aren't bothered about the value of
3177 ip because it is clobbered by calls. */
3178 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3179 && (range
& 0x3000) == 0x1000)
3181 unwind
.opcode_count
--;
3182 unwind
.sp_restored
= 0;
3183 range
= (range
| 0x2000) & ~0x1000;
3184 unwind
.pending_offset
= 0;
3190 /* See if we can use the short opcodes. These pop a block of up to 8
3191 registers starting with r4, plus maybe r14. */
3192 for (n
= 0; n
< 8; n
++)
3194 /* Break at the first non-saved register. */
3195 if ((range
& (1 << (n
+ 4))) == 0)
3198 /* See if there are any other bits set. */
3199 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3201 /* Use the long form. */
3202 op
= 0x8000 | ((range
>> 4) & 0xfff);
3203 add_unwind_opcode (op
, 2);
3207 /* Use the short form. */
3209 op
= 0xa8; /* Pop r14. */
3211 op
= 0xa0; /* Do not pop r14. */
3213 add_unwind_opcode (op
, 1);
3220 op
= 0xb100 | (range
& 0xf);
3221 add_unwind_opcode (op
, 2);
3224 /* Record the number of bytes pushed. */
3225 for (n
= 0; n
< 16; n
++)
3227 if (range
& (1 << n
))
3228 unwind
.frame_size
+= 4;
3233 /* Parse a directive saving FPA registers. */
3236 s_arm_unwind_save_fpa (int reg
)
3242 /* Get Number of registers to transfer. */
3243 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3246 exp
.X_op
= O_illegal
;
3248 if (exp
.X_op
!= O_constant
)
3250 as_bad (_("expected , <constant>"));
3251 ignore_rest_of_line ();
3255 num_regs
= exp
.X_add_number
;
3257 if (num_regs
< 1 || num_regs
> 4)
3259 as_bad (_("number of registers must be in the range [1:4]"));
3260 ignore_rest_of_line ();
3264 demand_empty_rest_of_line ();
3269 op
= 0xb4 | (num_regs
- 1);
3270 add_unwind_opcode (op
, 1);
3275 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3276 add_unwind_opcode (op
, 2);
3278 unwind
.frame_size
+= num_regs
* 12;
3282 /* Parse a directive saving VFP registers for ARMv6 and above. */
3285 s_arm_unwind_save_vfp_armv6 (void)
3290 int num_vfpv3_regs
= 0;
3291 int num_regs_below_16
;
3293 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3296 as_bad (_("expected register list"));
3297 ignore_rest_of_line ();
3301 demand_empty_rest_of_line ();
3303 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3304 than FSTMX/FLDMX-style ones). */
3306 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3308 num_vfpv3_regs
= count
;
3309 else if (start
+ count
> 16)
3310 num_vfpv3_regs
= start
+ count
- 16;
3312 if (num_vfpv3_regs
> 0)
3314 int start_offset
= start
> 16 ? start
- 16 : 0;
3315 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3316 add_unwind_opcode (op
, 2);
3319 /* Generate opcode for registers numbered in the range 0 .. 15. */
3320 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3321 assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3322 if (num_regs_below_16
> 0)
3324 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3325 add_unwind_opcode (op
, 2);
3328 unwind
.frame_size
+= count
* 8;
3332 /* Parse a directive saving VFP registers for pre-ARMv6. */
3335 s_arm_unwind_save_vfp (void)
3341 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3344 as_bad (_("expected register list"));
3345 ignore_rest_of_line ();
3349 demand_empty_rest_of_line ();
3354 op
= 0xb8 | (count
- 1);
3355 add_unwind_opcode (op
, 1);
3360 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3361 add_unwind_opcode (op
, 2);
3363 unwind
.frame_size
+= count
* 8 + 4;
3367 /* Parse a directive saving iWMMXt data registers. */
3370 s_arm_unwind_save_mmxwr (void)
3378 if (*input_line_pointer
== '{')
3379 input_line_pointer
++;
3383 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3387 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3392 as_tsktsk (_("register list not in ascending order"));
3395 if (*input_line_pointer
== '-')
3397 input_line_pointer
++;
3398 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3401 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3404 else if (reg
>= hi_reg
)
3406 as_bad (_("bad register range"));
3409 for (; reg
< hi_reg
; reg
++)
3413 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3415 if (*input_line_pointer
== '}')
3416 input_line_pointer
++;
3418 demand_empty_rest_of_line ();
3420 /* Generate any deferred opcodes because we're going to be looking at
3422 flush_pending_unwind ();
3424 for (i
= 0; i
< 16; i
++)
3426 if (mask
& (1 << i
))
3427 unwind
.frame_size
+= 8;
3430 /* Attempt to combine with a previous opcode. We do this because gcc
3431 likes to output separate unwind directives for a single block of
3433 if (unwind
.opcode_count
> 0)
3435 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3436 if ((i
& 0xf8) == 0xc0)
3439 /* Only merge if the blocks are contiguous. */
3442 if ((mask
& 0xfe00) == (1 << 9))
3444 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3445 unwind
.opcode_count
--;
3448 else if (i
== 6 && unwind
.opcode_count
>= 2)
3450 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3454 op
= 0xffff << (reg
- 1);
3456 && ((mask
& op
) == (1u << (reg
- 1))))
3458 op
= (1 << (reg
+ i
+ 1)) - 1;
3459 op
&= ~((1 << reg
) - 1);
3461 unwind
.opcode_count
-= 2;
3468 /* We want to generate opcodes in the order the registers have been
3469 saved, ie. descending order. */
3470 for (reg
= 15; reg
>= -1; reg
--)
3472 /* Save registers in blocks. */
3474 || !(mask
& (1 << reg
)))
3476 /* We found an unsaved reg. Generate opcodes to save the
3477 preceeding block. */
3483 op
= 0xc0 | (hi_reg
- 10);
3484 add_unwind_opcode (op
, 1);
3489 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3490 add_unwind_opcode (op
, 2);
3499 ignore_rest_of_line ();
3503 s_arm_unwind_save_mmxwcg (void)
3510 if (*input_line_pointer
== '{')
3511 input_line_pointer
++;
3515 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3519 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3525 as_tsktsk (_("register list not in ascending order"));
3528 if (*input_line_pointer
== '-')
3530 input_line_pointer
++;
3531 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3534 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3537 else if (reg
>= hi_reg
)
3539 as_bad (_("bad register range"));
3542 for (; reg
< hi_reg
; reg
++)
3546 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3548 if (*input_line_pointer
== '}')
3549 input_line_pointer
++;
3551 demand_empty_rest_of_line ();
3553 /* Generate any deferred opcodes because we're going to be looking at
3555 flush_pending_unwind ();
3557 for (reg
= 0; reg
< 16; reg
++)
3559 if (mask
& (1 << reg
))
3560 unwind
.frame_size
+= 4;
3563 add_unwind_opcode (op
, 2);
3566 ignore_rest_of_line ();
3570 /* Parse an unwind_save directive.
3571 If the argument is non-zero, this is a .vsave directive. */
3574 s_arm_unwind_save (int arch_v6
)
3577 struct reg_entry
*reg
;
3578 bfd_boolean had_brace
= FALSE
;
3580 /* Figure out what sort of save we have. */
3581 peek
= input_line_pointer
;
3589 reg
= arm_reg_parse_multi (&peek
);
3593 as_bad (_("register expected"));
3594 ignore_rest_of_line ();
3603 as_bad (_("FPA .unwind_save does not take a register list"));
3604 ignore_rest_of_line ();
3607 s_arm_unwind_save_fpa (reg
->number
);
3610 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
3613 s_arm_unwind_save_vfp_armv6 ();
3615 s_arm_unwind_save_vfp ();
3617 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
3618 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
3621 as_bad (_(".unwind_save does not support this kind of register"));
3622 ignore_rest_of_line ();
3627 /* Parse an unwind_movsp directive. */
3630 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
3636 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3639 as_bad (_(reg_expected_msgs
[REG_TYPE_RN
]));
3640 ignore_rest_of_line ();
3644 /* Optional constant. */
3645 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3647 if (immediate_for_directive (&offset
) == FAIL
)
3653 demand_empty_rest_of_line ();
3655 if (reg
== REG_SP
|| reg
== REG_PC
)
3657 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3661 if (unwind
.fp_reg
!= REG_SP
)
3662 as_bad (_("unexpected .unwind_movsp directive"));
3664 /* Generate opcode to restore the value. */
3666 add_unwind_opcode (op
, 1);
3668 /* Record the information for later. */
3669 unwind
.fp_reg
= reg
;
3670 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3671 unwind
.sp_restored
= 1;
3674 /* Parse an unwind_pad directive. */
3677 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
3681 if (immediate_for_directive (&offset
) == FAIL
)
3686 as_bad (_("stack increment must be multiple of 4"));
3687 ignore_rest_of_line ();
3691 /* Don't generate any opcodes, just record the details for later. */
3692 unwind
.frame_size
+= offset
;
3693 unwind
.pending_offset
+= offset
;
3695 demand_empty_rest_of_line ();
3698 /* Parse an unwind_setfp directive. */
3701 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
3707 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3708 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3711 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3713 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
3715 as_bad (_("expected <reg>, <reg>"));
3716 ignore_rest_of_line ();
3720 /* Optional constant. */
3721 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3723 if (immediate_for_directive (&offset
) == FAIL
)
3729 demand_empty_rest_of_line ();
3731 if (sp_reg
!= 13 && sp_reg
!= unwind
.fp_reg
)
3733 as_bad (_("register must be either sp or set by a previous"
3734 "unwind_movsp directive"));
3738 /* Don't generate any opcodes, just record the information for later. */
3739 unwind
.fp_reg
= fp_reg
;
3742 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3744 unwind
.fp_offset
-= offset
;
3747 /* Parse an unwind_raw directive. */
3750 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
3753 /* This is an arbitrary limit. */
3754 unsigned char op
[16];
3758 if (exp
.X_op
== O_constant
3759 && skip_past_comma (&input_line_pointer
) != FAIL
)
3761 unwind
.frame_size
+= exp
.X_add_number
;
3765 exp
.X_op
= O_illegal
;
3767 if (exp
.X_op
!= O_constant
)
3769 as_bad (_("expected <offset>, <opcode>"));
3770 ignore_rest_of_line ();
3776 /* Parse the opcode. */
3781 as_bad (_("unwind opcode too long"));
3782 ignore_rest_of_line ();
3784 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
3786 as_bad (_("invalid unwind opcode"));
3787 ignore_rest_of_line ();
3790 op
[count
++] = exp
.X_add_number
;
3792 /* Parse the next byte. */
3793 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3799 /* Add the opcode bytes in reverse order. */
3801 add_unwind_opcode (op
[count
], 1);
3803 demand_empty_rest_of_line ();
3807 /* Parse a .eabi_attribute directive. */
3810 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
3813 bfd_boolean is_string
;
3820 if (exp
.X_op
!= O_constant
)
3823 tag
= exp
.X_add_number
;
3824 if (tag
== 4 || tag
== 5 || tag
== 32 || (tag
> 32 && (tag
& 1) != 0))
3829 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3831 if (tag
== 32 || !is_string
)
3834 if (exp
.X_op
!= O_constant
)
3836 as_bad (_("expected numeric constant"));
3837 ignore_rest_of_line ();
3840 i
= exp
.X_add_number
;
3842 if (tag
== Tag_compatibility
3843 && skip_past_comma (&input_line_pointer
) == FAIL
)
3845 as_bad (_("expected comma"));
3846 ignore_rest_of_line ();
3851 skip_whitespace(input_line_pointer
);
3852 if (*input_line_pointer
!= '"')
3854 input_line_pointer
++;
3855 s
= input_line_pointer
;
3856 while (*input_line_pointer
&& *input_line_pointer
!= '"')
3857 input_line_pointer
++;
3858 if (*input_line_pointer
!= '"')
3860 saved_char
= *input_line_pointer
;
3861 *input_line_pointer
= 0;
3869 if (tag
== Tag_compatibility
)
3870 elf32_arm_add_eabi_attr_compat (stdoutput
, i
, s
);
3872 elf32_arm_add_eabi_attr_string (stdoutput
, tag
, s
);
3874 elf32_arm_add_eabi_attr_int (stdoutput
, tag
, i
);
3878 *input_line_pointer
= saved_char
;
3879 input_line_pointer
++;
3881 demand_empty_rest_of_line ();
3884 as_bad (_("bad string constant"));
3885 ignore_rest_of_line ();
3888 as_bad (_("expected <tag> , <value>"));
3889 ignore_rest_of_line ();
3891 #endif /* OBJ_ELF */
3893 static void s_arm_arch (int);
3894 static void s_arm_object_arch (int);
3895 static void s_arm_cpu (int);
3896 static void s_arm_fpu (int);
3901 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
3908 if (exp
.X_op
== O_symbol
)
3909 exp
.X_op
= O_secrel
;
3911 emit_expr (&exp
, 4);
3913 while (*input_line_pointer
++ == ',');
3915 input_line_pointer
--;
3916 demand_empty_rest_of_line ();
3920 /* This table describes all the machine specific pseudo-ops the assembler
3921 has to support. The fields are:
3922 pseudo-op name without dot
3923 function to call to execute this pseudo-op
3924 Integer arg to pass to the function. */
3926 const pseudo_typeS md_pseudo_table
[] =
3928 /* Never called because '.req' does not start a line. */
3929 { "req", s_req
, 0 },
3930 /* Following two are likewise never called. */
3933 { "unreq", s_unreq
, 0 },
3934 { "bss", s_bss
, 0 },
3935 { "align", s_align
, 0 },
3936 { "arm", s_arm
, 0 },
3937 { "thumb", s_thumb
, 0 },
3938 { "code", s_code
, 0 },
3939 { "force_thumb", s_force_thumb
, 0 },
3940 { "thumb_func", s_thumb_func
, 0 },
3941 { "thumb_set", s_thumb_set
, 0 },
3942 { "even", s_even
, 0 },
3943 { "ltorg", s_ltorg
, 0 },
3944 { "pool", s_ltorg
, 0 },
3945 { "syntax", s_syntax
, 0 },
3946 { "cpu", s_arm_cpu
, 0 },
3947 { "arch", s_arm_arch
, 0 },
3948 { "object_arch", s_arm_object_arch
, 0 },
3949 { "fpu", s_arm_fpu
, 0 },
3951 { "word", s_arm_elf_cons
, 4 },
3952 { "long", s_arm_elf_cons
, 4 },
3953 { "rel31", s_arm_rel31
, 0 },
3954 { "fnstart", s_arm_unwind_fnstart
, 0 },
3955 { "fnend", s_arm_unwind_fnend
, 0 },
3956 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
3957 { "personality", s_arm_unwind_personality
, 0 },
3958 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
3959 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
3960 { "save", s_arm_unwind_save
, 0 },
3961 { "vsave", s_arm_unwind_save
, 1 },
3962 { "movsp", s_arm_unwind_movsp
, 0 },
3963 { "pad", s_arm_unwind_pad
, 0 },
3964 { "setfp", s_arm_unwind_setfp
, 0 },
3965 { "unwind_raw", s_arm_unwind_raw
, 0 },
3966 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
3970 /* These are used for dwarf. */
3974 /* These are used for dwarf2. */
3975 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
3976 { "loc", dwarf2_directive_loc
, 0 },
3977 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
3979 { "extend", float_cons
, 'x' },
3980 { "ldouble", float_cons
, 'x' },
3981 { "packed", float_cons
, 'p' },
3983 {"secrel32", pe_directive_secrel
, 0},
3988 /* Parser functions used exclusively in instruction operands. */
3990 /* Generic immediate-value read function for use in insn parsing.
3991 STR points to the beginning of the immediate (the leading #);
3992 VAL receives the value; if the value is outside [MIN, MAX]
3993 issue an error. PREFIX_OPT is true if the immediate prefix is
3997 parse_immediate (char **str
, int *val
, int min
, int max
,
3998 bfd_boolean prefix_opt
)
4001 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4002 if (exp
.X_op
!= O_constant
)
4004 inst
.error
= _("constant expression required");
4008 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4010 inst
.error
= _("immediate value out of range");
4014 *val
= exp
.X_add_number
;
4018 /* Less-generic immediate-value read function with the possibility of loading a
4019 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4020 instructions. Puts the result directly in inst.operands[i]. */
4023 parse_big_immediate (char **str
, int i
)
4028 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4030 if (exp
.X_op
== O_constant
)
4032 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4033 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4034 O_constant. We have to be careful not to break compilation for
4035 32-bit X_add_number, though. */
4036 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4038 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4039 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4040 inst
.operands
[i
].regisimm
= 1;
4043 else if (exp
.X_op
== O_big
4044 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
4045 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
4047 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4048 /* Bignums have their least significant bits in
4049 generic_bignum[0]. Make sure we put 32 bits in imm and
4050 32 bits in reg, in a (hopefully) portable way. */
4051 assert (parts
!= 0);
4052 inst
.operands
[i
].imm
= 0;
4053 for (j
= 0; j
< parts
; j
++, idx
++)
4054 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4055 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4056 inst
.operands
[i
].reg
= 0;
4057 for (j
= 0; j
< parts
; j
++, idx
++)
4058 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4059 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4060 inst
.operands
[i
].regisimm
= 1;
4070 /* Returns the pseudo-register number of an FPA immediate constant,
4071 or FAIL if there isn't a valid constant here. */
4074 parse_fpa_immediate (char ** str
)
4076 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4082 /* First try and match exact strings, this is to guarantee
4083 that some formats will work even for cross assembly. */
4085 for (i
= 0; fp_const
[i
]; i
++)
4087 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4091 *str
+= strlen (fp_const
[i
]);
4092 if (is_end_of_line
[(unsigned char) **str
])
4098 /* Just because we didn't get a match doesn't mean that the constant
4099 isn't valid, just that it is in a format that we don't
4100 automatically recognize. Try parsing it with the standard
4101 expression routines. */
4103 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4105 /* Look for a raw floating point number. */
4106 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4107 && is_end_of_line
[(unsigned char) *save_in
])
4109 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4111 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4113 if (words
[j
] != fp_values
[i
][j
])
4117 if (j
== MAX_LITTLENUMS
)
4125 /* Try and parse a more complex expression, this will probably fail
4126 unless the code uses a floating point prefix (eg "0f"). */
4127 save_in
= input_line_pointer
;
4128 input_line_pointer
= *str
;
4129 if (expression (&exp
) == absolute_section
4130 && exp
.X_op
== O_big
4131 && exp
.X_add_number
< 0)
4133 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4135 if (gen_to_words (words
, 5, (long) 15) == 0)
4137 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4139 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4141 if (words
[j
] != fp_values
[i
][j
])
4145 if (j
== MAX_LITTLENUMS
)
4147 *str
= input_line_pointer
;
4148 input_line_pointer
= save_in
;
4155 *str
= input_line_pointer
;
4156 input_line_pointer
= save_in
;
4157 inst
.error
= _("invalid FPA immediate expression");
4161 /* Returns 1 if a number has "quarter-precision" float format
4162 0baBbbbbbc defgh000 00000000 00000000. */
4165 is_quarter_float (unsigned imm
)
4167 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4168 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4171 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4172 0baBbbbbbc defgh000 00000000 00000000.
4173 The minus-zero case needs special handling, since it can't be encoded in the
4174 "quarter-precision" float format, but can nonetheless be loaded as an integer
4178 parse_qfloat_immediate (char **ccp
, int *immed
)
4181 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4183 skip_past_char (&str
, '#');
4185 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4187 unsigned fpword
= 0;
4190 /* Our FP word must be 32 bits (single-precision FP). */
4191 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4193 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4197 if (is_quarter_float (fpword
) || fpword
== 0x80000000)
4210 /* Shift operands. */
4213 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4216 struct asm_shift_name
4219 enum shift_kind kind
;
4222 /* Third argument to parse_shift. */
4223 enum parse_shift_mode
4225 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4226 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4227 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4228 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4229 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4232 /* Parse a <shift> specifier on an ARM data processing instruction.
4233 This has three forms:
4235 (LSL|LSR|ASL|ASR|ROR) Rs
4236 (LSL|LSR|ASL|ASR|ROR) #imm
4239 Note that ASL is assimilated to LSL in the instruction encoding, and
4240 RRX to ROR #0 (which cannot be written as such). */
4243 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4245 const struct asm_shift_name
*shift_name
;
4246 enum shift_kind shift
;
4251 for (p
= *str
; ISALPHA (*p
); p
++)
4256 inst
.error
= _("shift expression expected");
4260 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4262 if (shift_name
== NULL
)
4264 inst
.error
= _("shift expression expected");
4268 shift
= shift_name
->kind
;
4272 case NO_SHIFT_RESTRICT
:
4273 case SHIFT_IMMEDIATE
: break;
4275 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4276 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4278 inst
.error
= _("'LSL' or 'ASR' required");
4283 case SHIFT_LSL_IMMEDIATE
:
4284 if (shift
!= SHIFT_LSL
)
4286 inst
.error
= _("'LSL' required");
4291 case SHIFT_ASR_IMMEDIATE
:
4292 if (shift
!= SHIFT_ASR
)
4294 inst
.error
= _("'ASR' required");
4302 if (shift
!= SHIFT_RRX
)
4304 /* Whitespace can appear here if the next thing is a bare digit. */
4305 skip_whitespace (p
);
4307 if (mode
== NO_SHIFT_RESTRICT
4308 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4310 inst
.operands
[i
].imm
= reg
;
4311 inst
.operands
[i
].immisreg
= 1;
4313 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4316 inst
.operands
[i
].shift_kind
= shift
;
4317 inst
.operands
[i
].shifted
= 1;
4322 /* Parse a <shifter_operand> for an ARM data processing instruction:
4325 #<immediate>, <rotate>
4329 where <shift> is defined by parse_shift above, and <rotate> is a
4330 multiple of 2 between 0 and 30. Validation of immediate operands
4331 is deferred to md_apply_fix. */
4334 parse_shifter_operand (char **str
, int i
)
4339 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4341 inst
.operands
[i
].reg
= value
;
4342 inst
.operands
[i
].isreg
= 1;
4344 /* parse_shift will override this if appropriate */
4345 inst
.reloc
.exp
.X_op
= O_constant
;
4346 inst
.reloc
.exp
.X_add_number
= 0;
4348 if (skip_past_comma (str
) == FAIL
)
4351 /* Shift operation on register. */
4352 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4355 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4358 if (skip_past_comma (str
) == SUCCESS
)
4360 /* #x, y -- ie explicit rotation by Y. */
4361 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4364 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4366 inst
.error
= _("constant expression expected");
4370 value
= expr
.X_add_number
;
4371 if (value
< 0 || value
> 30 || value
% 2 != 0)
4373 inst
.error
= _("invalid rotation");
4376 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4378 inst
.error
= _("invalid constant");
4382 /* Convert to decoded value. md_apply_fix will put it back. */
4383 inst
.reloc
.exp
.X_add_number
4384 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4385 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4388 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4389 inst
.reloc
.pc_rel
= 0;
4393 /* Group relocation information. Each entry in the table contains the
4394 textual name of the relocation as may appear in assembler source
4395 and must end with a colon.
4396 Along with this textual name are the relocation codes to be used if
4397 the corresponding instruction is an ALU instruction (ADD or SUB only),
4398 an LDR, an LDRS, or an LDC. */
4400 struct group_reloc_table_entry
4411 /* Varieties of non-ALU group relocation. */
4418 static struct group_reloc_table_entry group_reloc_table
[] =
4419 { /* Program counter relative: */
4421 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4426 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4427 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4428 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4429 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4431 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4436 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4437 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4438 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4439 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4441 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4442 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4443 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4444 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4445 /* Section base relative */
4447 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4452 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4453 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4454 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4455 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4457 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4462 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4463 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4464 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4465 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4467 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4468 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4469 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4470 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4472 /* Given the address of a pointer pointing to the textual name of a group
4473 relocation as may appear in assembler source, attempt to find its details
4474 in group_reloc_table. The pointer will be updated to the character after
4475 the trailing colon. On failure, FAIL will be returned; SUCCESS
4476 otherwise. On success, *entry will be updated to point at the relevant
4477 group_reloc_table entry. */
4480 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4483 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4485 int length
= strlen (group_reloc_table
[i
].name
);
4487 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0 &&
4488 (*str
)[length
] == ':')
4490 *out
= &group_reloc_table
[i
];
4491 *str
+= (length
+ 1);
4499 /* Parse a <shifter_operand> for an ARM data processing instruction
4500 (as for parse_shifter_operand) where group relocations are allowed:
4503 #<immediate>, <rotate>
4504 #:<group_reloc>:<expression>
4508 where <group_reloc> is one of the strings defined in group_reloc_table.
4509 The hashes are optional.
4511 Everything else is as for parse_shifter_operand. */
4513 static parse_operand_result
4514 parse_shifter_operand_group_reloc (char **str
, int i
)
4516 /* Determine if we have the sequence of characters #: or just :
4517 coming next. If we do, then we check for a group relocation.
4518 If we don't, punt the whole lot to parse_shifter_operand. */
4520 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4521 || (*str
)[0] == ':')
4523 struct group_reloc_table_entry
*entry
;
4525 if ((*str
)[0] == '#')
4530 /* Try to parse a group relocation. Anything else is an error. */
4531 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4533 inst
.error
= _("unknown group relocation");
4534 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4537 /* We now have the group relocation table entry corresponding to
4538 the name in the assembler source. Next, we parse the expression. */
4539 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4540 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4542 /* Record the relocation type (always the ALU variant here). */
4543 inst
.reloc
.type
= entry
->alu_code
;
4544 assert (inst
.reloc
.type
!= 0);
4546 return PARSE_OPERAND_SUCCESS
;
4549 return parse_shifter_operand (str
, i
) == SUCCESS
4550 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4552 /* Never reached. */
4555 /* Parse all forms of an ARM address expression. Information is written
4556 to inst.operands[i] and/or inst.reloc.
4558 Preindexed addressing (.preind=1):
4560 [Rn, #offset] .reg=Rn .reloc.exp=offset
4561 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4562 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4563 .shift_kind=shift .reloc.exp=shift_imm
4565 These three may have a trailing ! which causes .writeback to be set also.
4567 Postindexed addressing (.postind=1, .writeback=1):
4569 [Rn], #offset .reg=Rn .reloc.exp=offset
4570 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4571 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4572 .shift_kind=shift .reloc.exp=shift_imm
4574 Unindexed addressing (.preind=0, .postind=0):
4576 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4580 [Rn]{!} shorthand for [Rn,#0]{!}
4581 =immediate .isreg=0 .reloc.exp=immediate
4582 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4584 It is the caller's responsibility to check for addressing modes not
4585 supported by the instruction, and to set inst.reloc.type. */
4587 static parse_operand_result
4588 parse_address_main (char **str
, int i
, int group_relocations
,
4589 group_reloc_type group_type
)
4594 if (skip_past_char (&p
, '[') == FAIL
)
4596 if (skip_past_char (&p
, '=') == FAIL
)
4598 /* bare address - translate to PC-relative offset */
4599 inst
.reloc
.pc_rel
= 1;
4600 inst
.operands
[i
].reg
= REG_PC
;
4601 inst
.operands
[i
].isreg
= 1;
4602 inst
.operands
[i
].preind
= 1;
4604 /* else a load-constant pseudo op, no special treatment needed here */
4606 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4607 return PARSE_OPERAND_FAIL
;
4610 return PARSE_OPERAND_SUCCESS
;
4613 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4615 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4616 return PARSE_OPERAND_FAIL
;
4618 inst
.operands
[i
].reg
= reg
;
4619 inst
.operands
[i
].isreg
= 1;
4621 if (skip_past_comma (&p
) == SUCCESS
)
4623 inst
.operands
[i
].preind
= 1;
4626 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4628 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4630 inst
.operands
[i
].imm
= reg
;
4631 inst
.operands
[i
].immisreg
= 1;
4633 if (skip_past_comma (&p
) == SUCCESS
)
4634 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4635 return PARSE_OPERAND_FAIL
;
4637 else if (skip_past_char (&p
, ':') == SUCCESS
)
4639 /* FIXME: '@' should be used here, but it's filtered out by generic
4640 code before we get to see it here. This may be subject to
4643 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4644 if (exp
.X_op
!= O_constant
)
4646 inst
.error
= _("alignment must be constant");
4647 return PARSE_OPERAND_FAIL
;
4649 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4650 inst
.operands
[i
].immisalign
= 1;
4651 /* Alignments are not pre-indexes. */
4652 inst
.operands
[i
].preind
= 0;
4656 if (inst
.operands
[i
].negative
)
4658 inst
.operands
[i
].negative
= 0;
4662 if (group_relocations
&&
4663 ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
4666 struct group_reloc_table_entry
*entry
;
4668 /* Skip over the #: or : sequence. */
4674 /* Try to parse a group relocation. Anything else is an
4676 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
4678 inst
.error
= _("unknown group relocation");
4679 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4682 /* We now have the group relocation table entry corresponding to
4683 the name in the assembler source. Next, we parse the
4685 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4686 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4688 /* Record the relocation type. */
4692 inst
.reloc
.type
= entry
->ldr_code
;
4696 inst
.reloc
.type
= entry
->ldrs_code
;
4700 inst
.reloc
.type
= entry
->ldc_code
;
4707 if (inst
.reloc
.type
== 0)
4709 inst
.error
= _("this group relocation is not allowed on this instruction");
4710 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4714 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4715 return PARSE_OPERAND_FAIL
;
4719 if (skip_past_char (&p
, ']') == FAIL
)
4721 inst
.error
= _("']' expected");
4722 return PARSE_OPERAND_FAIL
;
4725 if (skip_past_char (&p
, '!') == SUCCESS
)
4726 inst
.operands
[i
].writeback
= 1;
4728 else if (skip_past_comma (&p
) == SUCCESS
)
4730 if (skip_past_char (&p
, '{') == SUCCESS
)
4732 /* [Rn], {expr} - unindexed, with option */
4733 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
4734 0, 255, TRUE
) == FAIL
)
4735 return PARSE_OPERAND_FAIL
;
4737 if (skip_past_char (&p
, '}') == FAIL
)
4739 inst
.error
= _("'}' expected at end of 'option' field");
4740 return PARSE_OPERAND_FAIL
;
4742 if (inst
.operands
[i
].preind
)
4744 inst
.error
= _("cannot combine index with option");
4745 return PARSE_OPERAND_FAIL
;
4748 return PARSE_OPERAND_SUCCESS
;
4752 inst
.operands
[i
].postind
= 1;
4753 inst
.operands
[i
].writeback
= 1;
4755 if (inst
.operands
[i
].preind
)
4757 inst
.error
= _("cannot combine pre- and post-indexing");
4758 return PARSE_OPERAND_FAIL
;
4762 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4764 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4766 /* We might be using the immediate for alignment already. If we
4767 are, OR the register number into the low-order bits. */
4768 if (inst
.operands
[i
].immisalign
)
4769 inst
.operands
[i
].imm
|= reg
;
4771 inst
.operands
[i
].imm
= reg
;
4772 inst
.operands
[i
].immisreg
= 1;
4774 if (skip_past_comma (&p
) == SUCCESS
)
4775 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4776 return PARSE_OPERAND_FAIL
;
4780 if (inst
.operands
[i
].negative
)
4782 inst
.operands
[i
].negative
= 0;
4785 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4786 return PARSE_OPERAND_FAIL
;
4791 /* If at this point neither .preind nor .postind is set, we have a
4792 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4793 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
4795 inst
.operands
[i
].preind
= 1;
4796 inst
.reloc
.exp
.X_op
= O_constant
;
4797 inst
.reloc
.exp
.X_add_number
= 0;
4800 return PARSE_OPERAND_SUCCESS
;
4804 parse_address (char **str
, int i
)
4806 return parse_address_main (str
, i
, 0, 0) == PARSE_OPERAND_SUCCESS
4810 static parse_operand_result
4811 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
4813 return parse_address_main (str
, i
, 1, type
);
4816 /* Parse an operand for a MOVW or MOVT instruction. */
4818 parse_half (char **str
)
4823 skip_past_char (&p
, '#');
4824 if (strncasecmp (p
, ":lower16:", 9) == 0)
4825 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
4826 else if (strncasecmp (p
, ":upper16:", 9) == 0)
4827 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
4829 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
4835 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4838 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
4840 if (inst
.reloc
.exp
.X_op
!= O_constant
)
4842 inst
.error
= _("constant expression expected");
4845 if (inst
.reloc
.exp
.X_add_number
< 0
4846 || inst
.reloc
.exp
.X_add_number
> 0xffff)
4848 inst
.error
= _("immediate value out of range");
4856 /* Miscellaneous. */
4858 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4859 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4861 parse_psr (char **str
)
4864 unsigned long psr_field
;
4865 const struct asm_psr
*psr
;
4868 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4869 feature for ease of use and backwards compatibility. */
4871 if (strncasecmp (p
, "SPSR", 4) == 0)
4872 psr_field
= SPSR_BIT
;
4873 else if (strncasecmp (p
, "CPSR", 4) == 0)
4880 while (ISALNUM (*p
) || *p
== '_');
4882 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
4893 /* A suffix follows. */
4899 while (ISALNUM (*p
) || *p
== '_');
4901 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
4905 psr_field
|= psr
->field
;
4910 goto error
; /* Garbage after "[CS]PSR". */
4912 psr_field
|= (PSR_c
| PSR_f
);
4918 inst
.error
= _("flag for {c}psr instruction expected");
4922 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4923 value suitable for splatting into the AIF field of the instruction. */
4926 parse_cps_flags (char **str
)
4935 case '\0': case ',':
4938 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
4939 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
4940 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
4943 inst
.error
= _("unrecognized CPS flag");
4948 if (saw_a_flag
== 0)
4950 inst
.error
= _("missing CPS flags");
4958 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4959 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4962 parse_endian_specifier (char **str
)
4967 if (strncasecmp (s
, "BE", 2))
4969 else if (strncasecmp (s
, "LE", 2))
4973 inst
.error
= _("valid endian specifiers are be or le");
4977 if (ISALNUM (s
[2]) || s
[2] == '_')
4979 inst
.error
= _("valid endian specifiers are be or le");
4984 return little_endian
;
4987 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4988 value suitable for poking into the rotate field of an sxt or sxta
4989 instruction, or FAIL on error. */
4992 parse_ror (char **str
)
4997 if (strncasecmp (s
, "ROR", 3) == 0)
5001 inst
.error
= _("missing rotation field after comma");
5005 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5010 case 0: *str
= s
; return 0x0;
5011 case 8: *str
= s
; return 0x1;
5012 case 16: *str
= s
; return 0x2;
5013 case 24: *str
= s
; return 0x3;
5016 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5021 /* Parse a conditional code (from conds[] below). The value returned is in the
5022 range 0 .. 14, or FAIL. */
5024 parse_cond (char **str
)
5027 const struct asm_cond
*c
;
5030 while (ISALPHA (*q
))
5033 c
= hash_find_n (arm_cond_hsh
, p
, q
- p
);
5036 inst
.error
= _("condition required");
5044 /* Parse an option for a barrier instruction. Returns the encoding for the
5047 parse_barrier (char **str
)
5050 const struct asm_barrier_opt
*o
;
5053 while (ISALPHA (*q
))
5056 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
5064 /* Parse the operands of a table branch instruction. Similar to a memory
5067 parse_tb (char **str
)
5072 if (skip_past_char (&p
, '[') == FAIL
)
5074 inst
.error
= _("'[' expected");
5078 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5080 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5083 inst
.operands
[0].reg
= reg
;
5085 if (skip_past_comma (&p
) == FAIL
)
5087 inst
.error
= _("',' expected");
5091 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5093 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5096 inst
.operands
[0].imm
= reg
;
5098 if (skip_past_comma (&p
) == SUCCESS
)
5100 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5102 if (inst
.reloc
.exp
.X_add_number
!= 1)
5104 inst
.error
= _("invalid shift");
5107 inst
.operands
[0].shifted
= 1;
5110 if (skip_past_char (&p
, ']') == FAIL
)
5112 inst
.error
= _("']' expected");
5119 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5120 information on the types the operands can take and how they are encoded.
5121 Up to four operands may be read; this function handles setting the
5122 ".present" field for each read operand itself.
5123 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5124 else returns FAIL. */
5127 parse_neon_mov (char **str
, int *which_operand
)
5129 int i
= *which_operand
, val
;
5130 enum arm_reg_type rtype
;
5132 struct neon_type_el optype
;
5134 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5136 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5137 inst
.operands
[i
].reg
= val
;
5138 inst
.operands
[i
].isscalar
= 1;
5139 inst
.operands
[i
].vectype
= optype
;
5140 inst
.operands
[i
++].present
= 1;
5142 if (skip_past_comma (&ptr
) == FAIL
)
5145 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5148 inst
.operands
[i
].reg
= val
;
5149 inst
.operands
[i
].isreg
= 1;
5150 inst
.operands
[i
].present
= 1;
5152 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5155 /* Cases 0, 1, 2, 3, 5 (D only). */
5156 if (skip_past_comma (&ptr
) == FAIL
)
5159 inst
.operands
[i
].reg
= val
;
5160 inst
.operands
[i
].isreg
= 1;
5161 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5162 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5163 inst
.operands
[i
].isvec
= 1;
5164 inst
.operands
[i
].vectype
= optype
;
5165 inst
.operands
[i
++].present
= 1;
5167 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5169 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5170 Case 13: VMOV <Sd>, <Rm> */
5171 inst
.operands
[i
].reg
= val
;
5172 inst
.operands
[i
].isreg
= 1;
5173 inst
.operands
[i
].present
= 1;
5175 if (rtype
== REG_TYPE_NQ
)
5177 first_error (_("can't use Neon quad register here"));
5180 else if (rtype
!= REG_TYPE_VFS
)
5183 if (skip_past_comma (&ptr
) == FAIL
)
5185 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5187 inst
.operands
[i
].reg
= val
;
5188 inst
.operands
[i
].isreg
= 1;
5189 inst
.operands
[i
].present
= 1;
5192 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5193 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5194 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5195 Case 10: VMOV.F32 <Sd>, #<imm>
5196 Case 11: VMOV.F64 <Dd>, #<imm> */
5198 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5199 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5200 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5202 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5205 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5206 Case 1: VMOV<c><q> <Dd>, <Dm>
5207 Case 8: VMOV.F32 <Sd>, <Sm>
5208 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5210 inst
.operands
[i
].reg
= val
;
5211 inst
.operands
[i
].isreg
= 1;
5212 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5213 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5214 inst
.operands
[i
].isvec
= 1;
5215 inst
.operands
[i
].vectype
= optype
;
5216 inst
.operands
[i
].present
= 1;
5218 if (skip_past_comma (&ptr
) == SUCCESS
)
5223 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5226 inst
.operands
[i
].reg
= val
;
5227 inst
.operands
[i
].isreg
= 1;
5228 inst
.operands
[i
++].present
= 1;
5230 if (skip_past_comma (&ptr
) == FAIL
)
5233 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5236 inst
.operands
[i
].reg
= val
;
5237 inst
.operands
[i
].isreg
= 1;
5238 inst
.operands
[i
++].present
= 1;
5243 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5247 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5250 inst
.operands
[i
].reg
= val
;
5251 inst
.operands
[i
].isreg
= 1;
5252 inst
.operands
[i
++].present
= 1;
5254 if (skip_past_comma (&ptr
) == FAIL
)
5257 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5259 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5260 inst
.operands
[i
].reg
= val
;
5261 inst
.operands
[i
].isscalar
= 1;
5262 inst
.operands
[i
].present
= 1;
5263 inst
.operands
[i
].vectype
= optype
;
5265 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5267 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5268 inst
.operands
[i
].reg
= val
;
5269 inst
.operands
[i
].isreg
= 1;
5270 inst
.operands
[i
++].present
= 1;
5272 if (skip_past_comma (&ptr
) == FAIL
)
5275 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5278 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5282 inst
.operands
[i
].reg
= val
;
5283 inst
.operands
[i
].isreg
= 1;
5284 inst
.operands
[i
].isvec
= 1;
5285 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5286 inst
.operands
[i
].vectype
= optype
;
5287 inst
.operands
[i
].present
= 1;
5289 if (rtype
== REG_TYPE_VFS
)
5293 if (skip_past_comma (&ptr
) == FAIL
)
5295 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5298 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5301 inst
.operands
[i
].reg
= val
;
5302 inst
.operands
[i
].isreg
= 1;
5303 inst
.operands
[i
].isvec
= 1;
5304 inst
.operands
[i
].issingle
= 1;
5305 inst
.operands
[i
].vectype
= optype
;
5306 inst
.operands
[i
].present
= 1;
5309 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5313 inst
.operands
[i
].reg
= val
;
5314 inst
.operands
[i
].isreg
= 1;
5315 inst
.operands
[i
].isvec
= 1;
5316 inst
.operands
[i
].issingle
= 1;
5317 inst
.operands
[i
].vectype
= optype
;
5318 inst
.operands
[i
++].present
= 1;
5323 first_error (_("parse error"));
5327 /* Successfully parsed the operands. Update args. */
5333 first_error (_("expected comma"));
5337 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5341 /* Matcher codes for parse_operands. */
5342 enum operand_parse_code
5344 OP_stop
, /* end of line */
5346 OP_RR
, /* ARM register */
5347 OP_RRnpc
, /* ARM register, not r15 */
5348 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5349 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5350 OP_RCP
, /* Coprocessor number */
5351 OP_RCN
, /* Coprocessor register */
5352 OP_RF
, /* FPA register */
5353 OP_RVS
, /* VFP single precision register */
5354 OP_RVD
, /* VFP double precision register (0..15) */
5355 OP_RND
, /* Neon double precision register (0..31) */
5356 OP_RNQ
, /* Neon quad precision register */
5357 OP_RVSD
, /* VFP single or double precision register */
5358 OP_RNDQ
, /* Neon double or quad precision register */
5359 OP_RNSDQ
, /* Neon single, double or quad precision register */
5360 OP_RNSC
, /* Neon scalar D[X] */
5361 OP_RVC
, /* VFP control register */
5362 OP_RMF
, /* Maverick F register */
5363 OP_RMD
, /* Maverick D register */
5364 OP_RMFX
, /* Maverick FX register */
5365 OP_RMDX
, /* Maverick DX register */
5366 OP_RMAX
, /* Maverick AX register */
5367 OP_RMDS
, /* Maverick DSPSC register */
5368 OP_RIWR
, /* iWMMXt wR register */
5369 OP_RIWC
, /* iWMMXt wC register */
5370 OP_RIWG
, /* iWMMXt wCG register */
5371 OP_RXA
, /* XScale accumulator register */
5373 OP_REGLST
, /* ARM register list */
5374 OP_VRSLST
, /* VFP single-precision register list */
5375 OP_VRDLST
, /* VFP double-precision register list */
5376 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5377 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5378 OP_NSTRLST
, /* Neon element/structure list */
5380 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5381 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5382 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5383 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5384 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5385 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5386 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5387 OP_VMOV
, /* Neon VMOV operands. */
5388 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
5389 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5390 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5392 OP_I0
, /* immediate zero */
5393 OP_I7
, /* immediate value 0 .. 7 */
5394 OP_I15
, /* 0 .. 15 */
5395 OP_I16
, /* 1 .. 16 */
5396 OP_I16z
, /* 0 .. 16 */
5397 OP_I31
, /* 0 .. 31 */
5398 OP_I31w
, /* 0 .. 31, optional trailing ! */
5399 OP_I32
, /* 1 .. 32 */
5400 OP_I32z
, /* 0 .. 32 */
5401 OP_I63
, /* 0 .. 63 */
5402 OP_I63s
, /* -64 .. 63 */
5403 OP_I64
, /* 1 .. 64 */
5404 OP_I64z
, /* 0 .. 64 */
5405 OP_I255
, /* 0 .. 255 */
5407 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5408 OP_I7b
, /* 0 .. 7 */
5409 OP_I15b
, /* 0 .. 15 */
5410 OP_I31b
, /* 0 .. 31 */
5412 OP_SH
, /* shifter operand */
5413 OP_SHG
, /* shifter operand with possible group relocation */
5414 OP_ADDR
, /* Memory address expression (any mode) */
5415 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5416 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5417 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5418 OP_EXP
, /* arbitrary expression */
5419 OP_EXPi
, /* same, with optional immediate prefix */
5420 OP_EXPr
, /* same, with optional relocation suffix */
5421 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5423 OP_CPSF
, /* CPS flags */
5424 OP_ENDI
, /* Endianness specifier */
5425 OP_PSR
, /* CPSR/SPSR mask for msr */
5426 OP_COND
, /* conditional code */
5427 OP_TB
, /* Table branch. */
5429 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5430 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5432 OP_RRnpc_I0
, /* ARM register or literal 0 */
5433 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5434 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5435 OP_RF_IF
, /* FPA register or immediate */
5436 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5437 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5439 /* Optional operands. */
5440 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5441 OP_oI31b
, /* 0 .. 31 */
5442 OP_oI32b
, /* 1 .. 32 */
5443 OP_oIffffb
, /* 0 .. 65535 */
5444 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5446 OP_oRR
, /* ARM register */
5447 OP_oRRnpc
, /* ARM register, not the PC */
5448 OP_oRND
, /* Optional Neon double precision register */
5449 OP_oRNQ
, /* Optional Neon quad precision register */
5450 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5451 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5452 OP_oSHll
, /* LSL immediate */
5453 OP_oSHar
, /* ASR immediate */
5454 OP_oSHllar
, /* LSL or ASR immediate */
5455 OP_oROR
, /* ROR 0/8/16/24 */
5456 OP_oBARRIER
, /* Option argument for a barrier instruction. */
5458 OP_FIRST_OPTIONAL
= OP_oI7b
5461 /* Generic instruction operand parser. This does no encoding and no
5462 semantic validation; it merely squirrels values away in the inst
5463 structure. Returns SUCCESS or FAIL depending on whether the
5464 specified grammar matched. */
5466 parse_operands (char *str
, const unsigned char *pattern
)
5468 unsigned const char *upat
= pattern
;
5469 char *backtrack_pos
= 0;
5470 const char *backtrack_error
= 0;
5471 int i
, val
, backtrack_index
= 0;
5472 enum arm_reg_type rtype
;
5473 parse_operand_result result
;
5475 #define po_char_or_fail(chr) do { \
5476 if (skip_past_char (&str, chr) == FAIL) \
5480 #define po_reg_or_fail(regtype) do { \
5481 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5482 &inst.operands[i].vectype); \
5485 first_error (_(reg_expected_msgs[regtype])); \
5488 inst.operands[i].reg = val; \
5489 inst.operands[i].isreg = 1; \
5490 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5491 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5492 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5493 || rtype == REG_TYPE_VFD \
5494 || rtype == REG_TYPE_NQ); \
5497 #define po_reg_or_goto(regtype, label) do { \
5498 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5499 &inst.operands[i].vectype); \
5503 inst.operands[i].reg = val; \
5504 inst.operands[i].isreg = 1; \
5505 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5506 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5507 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5508 || rtype == REG_TYPE_VFD \
5509 || rtype == REG_TYPE_NQ); \
5512 #define po_imm_or_fail(min, max, popt) do { \
5513 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5515 inst.operands[i].imm = val; \
5518 #define po_scalar_or_goto(elsz, label) do { \
5519 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5522 inst.operands[i].reg = val; \
5523 inst.operands[i].isscalar = 1; \
5526 #define po_misc_or_fail(expr) do { \
5531 #define po_misc_or_fail_no_backtrack(expr) do { \
5533 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5534 backtrack_pos = 0; \
5535 if (result != PARSE_OPERAND_SUCCESS) \
5539 skip_whitespace (str
);
5541 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5543 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5545 /* Remember where we are in case we need to backtrack. */
5546 assert (!backtrack_pos
);
5547 backtrack_pos
= str
;
5548 backtrack_error
= inst
.error
;
5549 backtrack_index
= i
;
5553 po_char_or_fail (',');
5561 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5562 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5563 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5564 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5565 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5566 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5568 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5569 case OP_RVC
: po_reg_or_fail (REG_TYPE_VFC
); break;
5570 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5571 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5572 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5573 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5574 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5575 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5576 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5577 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5578 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5579 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5581 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5583 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5584 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
5586 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
5588 /* Neon scalar. Using an element size of 8 means that some invalid
5589 scalars are accepted here, so deal with those in later code. */
5590 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5592 /* WARNING: We can expand to two operands here. This has the potential
5593 to totally confuse the backtracking mechanism! It will be OK at
5594 least as long as we don't try to use optional args as well,
5598 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5599 inst
.operands
[i
].present
= 1;
5601 skip_past_comma (&str
);
5602 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
5605 /* Optional register operand was omitted. Unfortunately, it's in
5606 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5607 here (this is a bit grotty). */
5608 inst
.operands
[i
] = inst
.operands
[i
-1];
5609 inst
.operands
[i
-1].present
= 0;
5612 /* There's a possibility of getting a 64-bit immediate here, so
5613 we need special handling. */
5614 if (parse_big_immediate (&str
, i
) == FAIL
)
5616 inst
.error
= _("immediate value is out of range");
5624 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
5627 po_imm_or_fail (0, 0, TRUE
);
5632 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
5637 po_scalar_or_goto (8, try_rr
);
5640 po_reg_or_fail (REG_TYPE_RN
);
5646 po_scalar_or_goto (8, try_nsdq
);
5649 po_reg_or_fail (REG_TYPE_NSDQ
);
5655 po_scalar_or_goto (8, try_ndq
);
5658 po_reg_or_fail (REG_TYPE_NDQ
);
5664 po_scalar_or_goto (8, try_vfd
);
5667 po_reg_or_fail (REG_TYPE_VFD
);
5672 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5673 not careful then bad things might happen. */
5674 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
5679 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
5682 /* There's a possibility of getting a 64-bit immediate here, so
5683 we need special handling. */
5684 if (parse_big_immediate (&str
, i
) == FAIL
)
5686 inst
.error
= _("immediate value is out of range");
5694 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
5697 po_imm_or_fail (0, 63, TRUE
);
5702 po_char_or_fail ('[');
5703 po_reg_or_fail (REG_TYPE_RN
);
5704 po_char_or_fail (']');
5708 po_reg_or_fail (REG_TYPE_RN
);
5709 if (skip_past_char (&str
, '!') == SUCCESS
)
5710 inst
.operands
[i
].writeback
= 1;
5714 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
5715 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
5716 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
5717 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
5718 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
5719 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
5720 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
5721 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
5722 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
5723 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
5724 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
5725 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
5727 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
5729 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
5730 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
5732 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
5733 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
5734 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
5736 /* Immediate variants */
5738 po_char_or_fail ('{');
5739 po_imm_or_fail (0, 255, TRUE
);
5740 po_char_or_fail ('}');
5744 /* The expression parser chokes on a trailing !, so we have
5745 to find it first and zap it. */
5748 while (*s
&& *s
!= ',')
5753 inst
.operands
[i
].writeback
= 1;
5755 po_imm_or_fail (0, 31, TRUE
);
5763 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5768 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5773 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5775 if (inst
.reloc
.exp
.X_op
== O_symbol
)
5777 val
= parse_reloc (&str
);
5780 inst
.error
= _("unrecognized relocation suffix");
5783 else if (val
!= BFD_RELOC_UNUSED
)
5785 inst
.operands
[i
].imm
= val
;
5786 inst
.operands
[i
].hasreloc
= 1;
5791 /* Operand for MOVW or MOVT. */
5793 po_misc_or_fail (parse_half (&str
));
5796 /* Register or expression */
5797 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
5798 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
5800 /* Register or immediate */
5801 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
5802 I0
: po_imm_or_fail (0, 0, FALSE
); break;
5804 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
5806 if (!is_immediate_prefix (*str
))
5809 val
= parse_fpa_immediate (&str
);
5812 /* FPA immediates are encoded as registers 8-15.
5813 parse_fpa_immediate has already applied the offset. */
5814 inst
.operands
[i
].reg
= val
;
5815 inst
.operands
[i
].isreg
= 1;
5818 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
5819 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
5821 /* Two kinds of register */
5824 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5826 || (rege
->type
!= REG_TYPE_MMXWR
5827 && rege
->type
!= REG_TYPE_MMXWC
5828 && rege
->type
!= REG_TYPE_MMXWCG
))
5830 inst
.error
= _("iWMMXt data or control register expected");
5833 inst
.operands
[i
].reg
= rege
->number
;
5834 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
5840 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5842 || (rege
->type
!= REG_TYPE_MMXWC
5843 && rege
->type
!= REG_TYPE_MMXWCG
))
5845 inst
.error
= _("iWMMXt control register expected");
5848 inst
.operands
[i
].reg
= rege
->number
;
5849 inst
.operands
[i
].isreg
= 1;
5854 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
5855 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
5856 case OP_oROR
: val
= parse_ror (&str
); break;
5857 case OP_PSR
: val
= parse_psr (&str
); break;
5858 case OP_COND
: val
= parse_cond (&str
); break;
5859 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
5862 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
5863 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
5866 val
= parse_psr (&str
);
5870 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
5873 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5875 if (strncasecmp (str
, "APSR_", 5) == 0)
5882 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
5883 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
5884 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
5885 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
5886 default: found
= 16;
5890 inst
.operands
[i
].isvec
= 1;
5897 po_misc_or_fail (parse_tb (&str
));
5900 /* Register lists */
5902 val
= parse_reg_list (&str
);
5905 inst
.operands
[1].writeback
= 1;
5911 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
5915 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
5919 /* Allow Q registers too. */
5920 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5925 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5927 inst
.operands
[i
].issingle
= 1;
5932 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5937 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
5938 &inst
.operands
[i
].vectype
);
5941 /* Addressing modes */
5943 po_misc_or_fail (parse_address (&str
, i
));
5947 po_misc_or_fail_no_backtrack (
5948 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
5952 po_misc_or_fail_no_backtrack (
5953 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
5957 po_misc_or_fail_no_backtrack (
5958 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
5962 po_misc_or_fail (parse_shifter_operand (&str
, i
));
5966 po_misc_or_fail_no_backtrack (
5967 parse_shifter_operand_group_reloc (&str
, i
));
5971 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
5975 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
5979 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
5983 as_fatal ("unhandled operand code %d", upat
[i
]);
5986 /* Various value-based sanity checks and shared operations. We
5987 do not signal immediate failures for the register constraints;
5988 this allows a syntax error to take precedence. */
5996 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
5997 inst
.error
= BAD_PC
;
6015 inst
.operands
[i
].imm
= val
;
6022 /* If we get here, this operand was successfully parsed. */
6023 inst
.operands
[i
].present
= 1;
6027 inst
.error
= BAD_ARGS
;
6032 /* The parse routine should already have set inst.error, but set a
6033 defaut here just in case. */
6035 inst
.error
= _("syntax error");
6039 /* Do not backtrack over a trailing optional argument that
6040 absorbed some text. We will only fail again, with the
6041 'garbage following instruction' error message, which is
6042 probably less helpful than the current one. */
6043 if (backtrack_index
== i
&& backtrack_pos
!= str
6044 && upat
[i
+1] == OP_stop
)
6047 inst
.error
= _("syntax error");
6051 /* Try again, skipping the optional argument at backtrack_pos. */
6052 str
= backtrack_pos
;
6053 inst
.error
= backtrack_error
;
6054 inst
.operands
[backtrack_index
].present
= 0;
6055 i
= backtrack_index
;
6059 /* Check that we have parsed all the arguments. */
6060 if (*str
!= '\0' && !inst
.error
)
6061 inst
.error
= _("garbage following instruction");
6063 return inst
.error
? FAIL
: SUCCESS
;
6066 #undef po_char_or_fail
6067 #undef po_reg_or_fail
6068 #undef po_reg_or_goto
6069 #undef po_imm_or_fail
6070 #undef po_scalar_or_fail
6072 /* Shorthand macro for instruction encoding functions issuing errors. */
6073 #define constraint(expr, err) do { \
6081 /* Functions for operand encoding. ARM, then Thumb. */
6083 #define rotate_left(v, n) (v << n | v >> (32 - n))
6085 /* If VAL can be encoded in the immediate field of an ARM instruction,
6086 return the encoded form. Otherwise, return FAIL. */
6089 encode_arm_immediate (unsigned int val
)
6093 for (i
= 0; i
< 32; i
+= 2)
6094 if ((a
= rotate_left (val
, i
)) <= 0xff)
6095 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6100 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6101 return the encoded form. Otherwise, return FAIL. */
6103 encode_thumb32_immediate (unsigned int val
)
6110 for (i
= 1; i
<= 24; i
++)
6113 if ((val
& ~(0xff << i
)) == 0)
6114 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6118 if (val
== ((a
<< 16) | a
))
6120 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6124 if (val
== ((a
<< 16) | a
))
6125 return 0x200 | (a
>> 8);
6129 /* Encode a VFP SP or DP register number into inst.instruction. */
6132 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6134 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6137 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
6140 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6143 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6148 first_error (_("D register out of range for selected VFP version"));
6156 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6160 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6164 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6168 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6172 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6176 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6184 /* Encode a <shift> in an ARM-format instruction. The immediate,
6185 if any, is handled by md_apply_fix. */
6187 encode_arm_shift (int i
)
6189 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6190 inst
.instruction
|= SHIFT_ROR
<< 5;
6193 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6194 if (inst
.operands
[i
].immisreg
)
6196 inst
.instruction
|= SHIFT_BY_REG
;
6197 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6200 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6205 encode_arm_shifter_operand (int i
)
6207 if (inst
.operands
[i
].isreg
)
6209 inst
.instruction
|= inst
.operands
[i
].reg
;
6210 encode_arm_shift (i
);
6213 inst
.instruction
|= INST_IMMEDIATE
;
6216 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6218 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6220 assert (inst
.operands
[i
].isreg
);
6221 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6223 if (inst
.operands
[i
].preind
)
6227 inst
.error
= _("instruction does not accept preindexed addressing");
6230 inst
.instruction
|= PRE_INDEX
;
6231 if (inst
.operands
[i
].writeback
)
6232 inst
.instruction
|= WRITE_BACK
;
6235 else if (inst
.operands
[i
].postind
)
6237 assert (inst
.operands
[i
].writeback
);
6239 inst
.instruction
|= WRITE_BACK
;
6241 else /* unindexed - only for coprocessor */
6243 inst
.error
= _("instruction does not accept unindexed addressing");
6247 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6248 && (((inst
.instruction
& 0x000f0000) >> 16)
6249 == ((inst
.instruction
& 0x0000f000) >> 12)))
6250 as_warn ((inst
.instruction
& LOAD_BIT
)
6251 ? _("destination register same as write-back base")
6252 : _("source register same as write-back base"));
6255 /* inst.operands[i] was set up by parse_address. Encode it into an
6256 ARM-format mode 2 load or store instruction. If is_t is true,
6257 reject forms that cannot be used with a T instruction (i.e. not
6260 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6262 encode_arm_addr_mode_common (i
, is_t
);
6264 if (inst
.operands
[i
].immisreg
)
6266 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6267 inst
.instruction
|= inst
.operands
[i
].imm
;
6268 if (!inst
.operands
[i
].negative
)
6269 inst
.instruction
|= INDEX_UP
;
6270 if (inst
.operands
[i
].shifted
)
6272 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6273 inst
.instruction
|= SHIFT_ROR
<< 5;
6276 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6277 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6281 else /* immediate offset in inst.reloc */
6283 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6284 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6288 /* inst.operands[i] was set up by parse_address. Encode it into an
6289 ARM-format mode 3 load or store instruction. Reject forms that
6290 cannot be used with such instructions. If is_t is true, reject
6291 forms that cannot be used with a T instruction (i.e. not
6294 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6296 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6298 inst
.error
= _("instruction does not accept scaled register index");
6302 encode_arm_addr_mode_common (i
, is_t
);
6304 if (inst
.operands
[i
].immisreg
)
6306 inst
.instruction
|= inst
.operands
[i
].imm
;
6307 if (!inst
.operands
[i
].negative
)
6308 inst
.instruction
|= INDEX_UP
;
6310 else /* immediate offset in inst.reloc */
6312 inst
.instruction
|= HWOFFSET_IMM
;
6313 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6314 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6318 /* inst.operands[i] was set up by parse_address. Encode it into an
6319 ARM-format instruction. Reject all forms which cannot be encoded
6320 into a coprocessor load/store instruction. If wb_ok is false,
6321 reject use of writeback; if unind_ok is false, reject use of
6322 unindexed addressing. If reloc_override is not 0, use it instead
6323 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6324 (in which case it is preserved). */
6327 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6329 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6331 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6333 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6335 assert (!inst
.operands
[i
].writeback
);
6338 inst
.error
= _("instruction does not support unindexed addressing");
6341 inst
.instruction
|= inst
.operands
[i
].imm
;
6342 inst
.instruction
|= INDEX_UP
;
6346 if (inst
.operands
[i
].preind
)
6347 inst
.instruction
|= PRE_INDEX
;
6349 if (inst
.operands
[i
].writeback
)
6351 if (inst
.operands
[i
].reg
== REG_PC
)
6353 inst
.error
= _("pc may not be used with write-back");
6358 inst
.error
= _("instruction does not support writeback");
6361 inst
.instruction
|= WRITE_BACK
;
6365 inst
.reloc
.type
= reloc_override
;
6366 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6367 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6368 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6371 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6373 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6379 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6380 Determine whether it can be performed with a move instruction; if
6381 it can, convert inst.instruction to that move instruction and
6382 return 1; if it can't, convert inst.instruction to a literal-pool
6383 load and return 0. If this is not a valid thing to do in the
6384 current context, set inst.error and return 1.
6386 inst.operands[i] describes the destination register. */
6389 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6394 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6398 if ((inst
.instruction
& tbit
) == 0)
6400 inst
.error
= _("invalid pseudo operation");
6403 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6405 inst
.error
= _("constant expression expected");
6408 if (inst
.reloc
.exp
.X_op
== O_constant
)
6412 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
6414 /* This can be done with a mov(1) instruction. */
6415 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
6416 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
6422 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
6425 /* This can be done with a mov instruction. */
6426 inst
.instruction
&= LITERAL_MASK
;
6427 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
6428 inst
.instruction
|= value
& 0xfff;
6432 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
6435 /* This can be done with a mvn instruction. */
6436 inst
.instruction
&= LITERAL_MASK
;
6437 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
6438 inst
.instruction
|= value
& 0xfff;
6444 if (add_to_lit_pool () == FAIL
)
6446 inst
.error
= _("literal pool insertion failed");
6449 inst
.operands
[1].reg
= REG_PC
;
6450 inst
.operands
[1].isreg
= 1;
6451 inst
.operands
[1].preind
= 1;
6452 inst
.reloc
.pc_rel
= 1;
6453 inst
.reloc
.type
= (thumb_p
6454 ? BFD_RELOC_ARM_THUMB_OFFSET
6456 ? BFD_RELOC_ARM_HWLITERAL
6457 : BFD_RELOC_ARM_LITERAL
));
6461 /* Functions for instruction encoding, sorted by subarchitecture.
6462 First some generics; their names are taken from the conventional
6463 bit positions for register arguments in ARM format instructions. */
6473 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6479 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6480 inst
.instruction
|= inst
.operands
[1].reg
;
6486 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6487 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6493 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6494 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6500 unsigned Rn
= inst
.operands
[2].reg
;
6501 /* Enforce restrictions on SWP instruction. */
6502 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
6503 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
6504 _("Rn must not overlap other operands"));
6505 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6506 inst
.instruction
|= inst
.operands
[1].reg
;
6507 inst
.instruction
|= Rn
<< 16;
6513 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6514 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6515 inst
.instruction
|= inst
.operands
[2].reg
;
6521 inst
.instruction
|= inst
.operands
[0].reg
;
6522 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6523 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6529 inst
.instruction
|= inst
.operands
[0].imm
;
6535 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6536 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
6539 /* ARM instructions, in alphabetical order by function name (except
6540 that wrapper functions appear immediately after the function they
6543 /* This is a pseudo-op of the form "adr rd, label" to be converted
6544 into a relative address of the form "add rd, pc, #label-.-8". */
6549 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6551 /* Frag hacking will turn this into a sub instruction if the offset turns
6552 out to be negative. */
6553 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6554 inst
.reloc
.pc_rel
= 1;
6555 inst
.reloc
.exp
.X_add_number
-= 8;
6558 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6559 into a relative address of the form:
6560 add rd, pc, #low(label-.-8)"
6561 add rd, rd, #high(label-.-8)" */
6566 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6568 /* Frag hacking will turn this into a sub instruction if the offset turns
6569 out to be negative. */
6570 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
6571 inst
.reloc
.pc_rel
= 1;
6572 inst
.size
= INSN_SIZE
* 2;
6573 inst
.reloc
.exp
.X_add_number
-= 8;
6579 if (!inst
.operands
[1].present
)
6580 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
6581 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6582 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6583 encode_arm_shifter_operand (2);
6589 if (inst
.operands
[0].present
)
6591 constraint ((inst
.instruction
& 0xf0) != 0x40
6592 && inst
.operands
[0].imm
!= 0xf,
6593 "bad barrier type");
6594 inst
.instruction
|= inst
.operands
[0].imm
;
6597 inst
.instruction
|= 0xf;
6603 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
6604 constraint (msb
> 32, _("bit-field extends past end of register"));
6605 /* The instruction encoding stores the LSB and MSB,
6606 not the LSB and width. */
6607 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6608 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
6609 inst
.instruction
|= (msb
- 1) << 16;
6617 /* #0 in second position is alternative syntax for bfc, which is
6618 the same instruction but with REG_PC in the Rm field. */
6619 if (!inst
.operands
[1].isreg
)
6620 inst
.operands
[1].reg
= REG_PC
;
6622 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
6623 constraint (msb
> 32, _("bit-field extends past end of register"));
6624 /* The instruction encoding stores the LSB and MSB,
6625 not the LSB and width. */
6626 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6627 inst
.instruction
|= inst
.operands
[1].reg
;
6628 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6629 inst
.instruction
|= (msb
- 1) << 16;
6635 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
6636 _("bit-field extends past end of register"));
6637 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6638 inst
.instruction
|= inst
.operands
[1].reg
;
6639 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6640 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
6643 /* ARM V5 breakpoint instruction (argument parse)
6644 BKPT <16 bit unsigned immediate>
6645 Instruction is not conditional.
6646 The bit pattern given in insns[] has the COND_ALWAYS condition,
6647 and it is an error if the caller tried to override that. */
6652 /* Top 12 of 16 bits to bits 19:8. */
6653 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
6655 /* Bottom 4 of 16 bits to bits 3:0. */
6656 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
6660 encode_branch (int default_reloc
)
6662 if (inst
.operands
[0].hasreloc
)
6664 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
6665 _("the only suffix valid here is '(plt)'"));
6666 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
6670 inst
.reloc
.type
= default_reloc
;
6672 inst
.reloc
.pc_rel
= 1;
6679 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6680 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6683 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6690 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6692 if (inst
.cond
== COND_ALWAYS
)
6693 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6695 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6699 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6702 /* ARM V5 branch-link-exchange instruction (argument parse)
6703 BLX <target_addr> ie BLX(1)
6704 BLX{<condition>} <Rm> ie BLX(2)
6705 Unfortunately, there are two different opcodes for this mnemonic.
6706 So, the insns[].value is not used, and the code here zaps values
6707 into inst.instruction.
6708 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6713 if (inst
.operands
[0].isreg
)
6715 /* Arg is a register; the opcode provided by insns[] is correct.
6716 It is not illegal to do "blx pc", just useless. */
6717 if (inst
.operands
[0].reg
== REG_PC
)
6718 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6720 inst
.instruction
|= inst
.operands
[0].reg
;
6724 /* Arg is an address; this instruction cannot be executed
6725 conditionally, and the opcode must be adjusted. */
6726 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6727 inst
.instruction
= 0xfa000000;
6729 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6730 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6733 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
6740 if (inst
.operands
[0].reg
== REG_PC
)
6741 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6743 inst
.instruction
|= inst
.operands
[0].reg
;
6747 /* ARM v5TEJ. Jump to Jazelle code. */
6752 if (inst
.operands
[0].reg
== REG_PC
)
6753 as_tsktsk (_("use of r15 in bxj is not really useful"));
6755 inst
.instruction
|= inst
.operands
[0].reg
;
6758 /* Co-processor data operation:
6759 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6760 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6764 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6765 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
6766 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6767 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6768 inst
.instruction
|= inst
.operands
[4].reg
;
6769 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6775 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6776 encode_arm_shifter_operand (1);
6779 /* Transfer between coprocessor and ARM registers.
6780 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6785 No special properties. */
6790 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6791 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
6792 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6793 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6794 inst
.instruction
|= inst
.operands
[4].reg
;
6795 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6798 /* Transfer between coprocessor register and pair of ARM registers.
6799 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6804 Two XScale instructions are special cases of these:
6806 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6807 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6809 Result unpredicatable if Rd or Rn is R15. */
6814 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6815 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
6816 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6817 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6818 inst
.instruction
|= inst
.operands
[4].reg
;
6824 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
6825 inst
.instruction
|= inst
.operands
[1].imm
;
6831 inst
.instruction
|= inst
.operands
[0].imm
;
6837 /* There is no IT instruction in ARM mode. We
6838 process it but do not generate code for it. */
6845 int base_reg
= inst
.operands
[0].reg
;
6846 int range
= inst
.operands
[1].imm
;
6848 inst
.instruction
|= base_reg
<< 16;
6849 inst
.instruction
|= range
;
6851 if (inst
.operands
[1].writeback
)
6852 inst
.instruction
|= LDM_TYPE_2_OR_3
;
6854 if (inst
.operands
[0].writeback
)
6856 inst
.instruction
|= WRITE_BACK
;
6857 /* Check for unpredictable uses of writeback. */
6858 if (inst
.instruction
& LOAD_BIT
)
6860 /* Not allowed in LDM type 2. */
6861 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
6862 && ((range
& (1 << REG_PC
)) == 0))
6863 as_warn (_("writeback of base register is UNPREDICTABLE"));
6864 /* Only allowed if base reg not in list for other types. */
6865 else if (range
& (1 << base_reg
))
6866 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6870 /* Not allowed for type 2. */
6871 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
6872 as_warn (_("writeback of base register is UNPREDICTABLE"));
6873 /* Only allowed if base reg not in list, or first in list. */
6874 else if ((range
& (1 << base_reg
))
6875 && (range
& ((1 << base_reg
) - 1)))
6876 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6881 /* ARMv5TE load-consecutive (argument parse)
6890 constraint (inst
.operands
[0].reg
% 2 != 0,
6891 _("first destination register must be even"));
6892 constraint (inst
.operands
[1].present
6893 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6894 _("can only load two consecutive registers"));
6895 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6896 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
6898 if (!inst
.operands
[1].present
)
6899 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
6901 if (inst
.instruction
& LOAD_BIT
)
6903 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6904 register and the first register written; we have to diagnose
6905 overlap between the base and the second register written here. */
6907 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
6908 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
6909 as_warn (_("base register written back, and overlaps "
6910 "second destination register"));
6912 /* For an index-register load, the index register must not overlap the
6913 destination (even if not write-back). */
6914 else if (inst
.operands
[2].immisreg
6915 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
6916 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
6917 as_warn (_("index register overlaps destination register"));
6920 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6921 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
6927 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
6928 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
6929 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
6930 || inst
.operands
[1].negative
6931 /* This can arise if the programmer has written
6933 or if they have mistakenly used a register name as the last
6936 It is very difficult to distinguish between these two cases
6937 because "rX" might actually be a label. ie the register
6938 name has been occluded by a symbol of the same name. So we
6939 just generate a general 'bad addressing mode' type error
6940 message and leave it up to the programmer to discover the
6941 true cause and fix their mistake. */
6942 || (inst
.operands
[1].reg
== REG_PC
),
6945 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6946 || inst
.reloc
.exp
.X_add_number
!= 0,
6947 _("offset must be zero in ARM encoding"));
6949 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6950 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6951 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6957 constraint (inst
.operands
[0].reg
% 2 != 0,
6958 _("even register required"));
6959 constraint (inst
.operands
[1].present
6960 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6961 _("can only load two consecutive registers"));
6962 /* If op 1 were present and equal to PC, this function wouldn't
6963 have been called in the first place. */
6964 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6966 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6967 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6973 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6974 if (!inst
.operands
[1].isreg
)
6975 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
6977 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
6983 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6985 if (inst
.operands
[1].preind
)
6987 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6988 inst
.reloc
.exp
.X_add_number
!= 0,
6989 _("this instruction requires a post-indexed address"));
6991 inst
.operands
[1].preind
= 0;
6992 inst
.operands
[1].postind
= 1;
6993 inst
.operands
[1].writeback
= 1;
6995 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6996 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
6999 /* Halfword and signed-byte load/store operations. */
7004 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7005 if (!inst
.operands
[1].isreg
)
7006 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7008 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7014 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7016 if (inst
.operands
[1].preind
)
7018 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
7019 inst
.reloc
.exp
.X_add_number
!= 0,
7020 _("this instruction requires a post-indexed address"));
7022 inst
.operands
[1].preind
= 0;
7023 inst
.operands
[1].postind
= 1;
7024 inst
.operands
[1].writeback
= 1;
7026 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7027 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7030 /* Co-processor register load/store.
7031 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7035 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7036 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7037 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7043 /* This restriction does not apply to mls (nor to mla in v6, but
7044 that's hard to detect at present). */
7045 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7046 && !(inst
.instruction
& 0x00400000))
7047 as_tsktsk (_("rd and rm should be different in mla"));
7049 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7050 inst
.instruction
|= inst
.operands
[1].reg
;
7051 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7052 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7059 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7060 encode_arm_shifter_operand (1);
7063 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7070 top
= (inst
.instruction
& 0x00400000) != 0;
7071 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7072 _(":lower16: not allowed this instruction"));
7073 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7074 _(":upper16: not allowed instruction"));
7075 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7076 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7078 imm
= inst
.reloc
.exp
.X_add_number
;
7079 /* The value is in two pieces: 0:11, 16:19. */
7080 inst
.instruction
|= (imm
& 0x00000fff);
7081 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7085 static void do_vfp_nsyn_opcode (const char *);
7088 do_vfp_nsyn_mrs (void)
7090 if (inst
.operands
[0].isvec
)
7092 if (inst
.operands
[1].reg
!= 1)
7093 first_error (_("operand 1 must be FPSCR"));
7094 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7095 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7096 do_vfp_nsyn_opcode ("fmstat");
7098 else if (inst
.operands
[1].isvec
)
7099 do_vfp_nsyn_opcode ("fmrx");
7107 do_vfp_nsyn_msr (void)
7109 if (inst
.operands
[0].isvec
)
7110 do_vfp_nsyn_opcode ("fmxr");
7120 if (do_vfp_nsyn_mrs () == SUCCESS
)
7123 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7124 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7126 _("'CPSR' or 'SPSR' expected"));
7127 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7128 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7131 /* Two possible forms:
7132 "{C|S}PSR_<field>, Rm",
7133 "{C|S}PSR_f, #expression". */
7138 if (do_vfp_nsyn_msr () == SUCCESS
)
7141 inst
.instruction
|= inst
.operands
[0].imm
;
7142 if (inst
.operands
[1].isreg
)
7143 inst
.instruction
|= inst
.operands
[1].reg
;
7146 inst
.instruction
|= INST_IMMEDIATE
;
7147 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7148 inst
.reloc
.pc_rel
= 0;
7155 if (!inst
.operands
[2].present
)
7156 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7157 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7158 inst
.instruction
|= inst
.operands
[1].reg
;
7159 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7161 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7162 as_tsktsk (_("rd and rm should be different in mul"));
7165 /* Long Multiply Parser
7166 UMULL RdLo, RdHi, Rm, Rs
7167 SMULL RdLo, RdHi, Rm, Rs
7168 UMLAL RdLo, RdHi, Rm, Rs
7169 SMLAL RdLo, RdHi, Rm, Rs. */
7174 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7175 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7176 inst
.instruction
|= inst
.operands
[2].reg
;
7177 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7179 /* rdhi, rdlo and rm must all be different. */
7180 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7181 || inst
.operands
[0].reg
== inst
.operands
[2].reg
7182 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7183 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7189 if (inst
.operands
[0].present
)
7191 /* Architectural NOP hints are CPSR sets with no bits selected. */
7192 inst
.instruction
&= 0xf0000000;
7193 inst
.instruction
|= 0x0320f000 + inst
.operands
[0].imm
;
7197 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7198 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7199 Condition defaults to COND_ALWAYS.
7200 Error if Rd, Rn or Rm are R15. */
7205 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7206 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7207 inst
.instruction
|= inst
.operands
[2].reg
;
7208 if (inst
.operands
[3].present
)
7209 encode_arm_shift (3);
7212 /* ARM V6 PKHTB (Argument Parse). */
7217 if (!inst
.operands
[3].present
)
7219 /* If the shift specifier is omitted, turn the instruction
7220 into pkhbt rd, rm, rn. */
7221 inst
.instruction
&= 0xfff00010;
7222 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7223 inst
.instruction
|= inst
.operands
[1].reg
;
7224 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7228 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7229 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7230 inst
.instruction
|= inst
.operands
[2].reg
;
7231 encode_arm_shift (3);
7235 /* ARMv5TE: Preload-Cache
7239 Syntactically, like LDR with B=1, W=0, L=1. */
7244 constraint (!inst
.operands
[0].isreg
,
7245 _("'[' expected after PLD mnemonic"));
7246 constraint (inst
.operands
[0].postind
,
7247 _("post-indexed expression used in preload instruction"));
7248 constraint (inst
.operands
[0].writeback
,
7249 _("writeback used in preload instruction"));
7250 constraint (!inst
.operands
[0].preind
,
7251 _("unindexed addressing used in preload instruction"));
7252 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7255 /* ARMv7: PLI <addr_mode> */
7259 constraint (!inst
.operands
[0].isreg
,
7260 _("'[' expected after PLI mnemonic"));
7261 constraint (inst
.operands
[0].postind
,
7262 _("post-indexed expression used in preload instruction"));
7263 constraint (inst
.operands
[0].writeback
,
7264 _("writeback used in preload instruction"));
7265 constraint (!inst
.operands
[0].preind
,
7266 _("unindexed addressing used in preload instruction"));
7267 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7268 inst
.instruction
&= ~PRE_INDEX
;
7274 inst
.operands
[1] = inst
.operands
[0];
7275 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
7276 inst
.operands
[0].isreg
= 1;
7277 inst
.operands
[0].writeback
= 1;
7278 inst
.operands
[0].reg
= REG_SP
;
7282 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7283 word at the specified address and the following word
7285 Unconditionally executed.
7286 Error if Rn is R15. */
7291 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7292 if (inst
.operands
[0].writeback
)
7293 inst
.instruction
|= WRITE_BACK
;
7296 /* ARM V6 ssat (argument parse). */
7301 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7302 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
7303 inst
.instruction
|= inst
.operands
[2].reg
;
7305 if (inst
.operands
[3].present
)
7306 encode_arm_shift (3);
7309 /* ARM V6 usat (argument parse). */
7314 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7315 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7316 inst
.instruction
|= inst
.operands
[2].reg
;
7318 if (inst
.operands
[3].present
)
7319 encode_arm_shift (3);
7322 /* ARM V6 ssat16 (argument parse). */
7327 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7328 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
7329 inst
.instruction
|= inst
.operands
[2].reg
;
7335 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7336 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7337 inst
.instruction
|= inst
.operands
[2].reg
;
7340 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7341 preserving the other bits.
7343 setend <endian_specifier>, where <endian_specifier> is either
7349 if (inst
.operands
[0].imm
)
7350 inst
.instruction
|= 0x200;
7356 unsigned int Rm
= (inst
.operands
[1].present
7357 ? inst
.operands
[1].reg
7358 : inst
.operands
[0].reg
);
7360 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7361 inst
.instruction
|= Rm
;
7362 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
7364 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7365 inst
.instruction
|= SHIFT_BY_REG
;
7368 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7374 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
7375 inst
.reloc
.pc_rel
= 0;
7381 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
7382 inst
.reloc
.pc_rel
= 0;
7385 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7386 SMLAxy{cond} Rd,Rm,Rs,Rn
7387 SMLAWy{cond} Rd,Rm,Rs,Rn
7388 Error if any register is R15. */
7393 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7394 inst
.instruction
|= inst
.operands
[1].reg
;
7395 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7396 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7399 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7400 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7401 Error if any register is R15.
7402 Warning if Rdlo == Rdhi. */
7407 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7408 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7409 inst
.instruction
|= inst
.operands
[2].reg
;
7410 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7412 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7413 as_tsktsk (_("rdhi and rdlo must be different"));
7416 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7417 SMULxy{cond} Rd,Rm,Rs
7418 Error if any register is R15. */
7423 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7424 inst
.instruction
|= inst
.operands
[1].reg
;
7425 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7428 /* ARM V6 srs (argument parse). */
7433 inst
.instruction
|= inst
.operands
[0].imm
;
7434 if (inst
.operands
[0].writeback
)
7435 inst
.instruction
|= WRITE_BACK
;
7438 /* ARM V6 strex (argument parse). */
7443 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
7444 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
7445 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
7446 || inst
.operands
[2].negative
7447 /* See comment in do_ldrex(). */
7448 || (inst
.operands
[2].reg
== REG_PC
),
7451 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7452 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
7454 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7455 || inst
.reloc
.exp
.X_add_number
!= 0,
7456 _("offset must be zero in ARM encoding"));
7458 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7459 inst
.instruction
|= inst
.operands
[1].reg
;
7460 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7461 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7467 constraint (inst
.operands
[1].reg
% 2 != 0,
7468 _("even register required"));
7469 constraint (inst
.operands
[2].present
7470 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
7471 _("can only store two consecutive registers"));
7472 /* If op 2 were present and equal to PC, this function wouldn't
7473 have been called in the first place. */
7474 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
7476 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7477 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
7478 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
7481 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7482 inst
.instruction
|= inst
.operands
[1].reg
;
7483 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7486 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7487 extends it to 32-bits, and adds the result to a value in another
7488 register. You can specify a rotation by 0, 8, 16, or 24 bits
7489 before extracting the 16-bit value.
7490 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7491 Condition defaults to COND_ALWAYS.
7492 Error if any register uses R15. */
7497 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7498 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7499 inst
.instruction
|= inst
.operands
[2].reg
;
7500 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
7505 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7506 Condition defaults to COND_ALWAYS.
7507 Error if any register uses R15. */
7512 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7513 inst
.instruction
|= inst
.operands
[1].reg
;
7514 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
7517 /* VFP instructions. In a logical order: SP variant first, monad
7518 before dyad, arithmetic then move then load/store. */
7521 do_vfp_sp_monadic (void)
7523 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7524 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7528 do_vfp_sp_dyadic (void)
7530 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7531 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7532 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7536 do_vfp_sp_compare_z (void)
7538 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7542 do_vfp_dp_sp_cvt (void)
7544 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7545 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7549 do_vfp_sp_dp_cvt (void)
7551 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7552 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7556 do_vfp_reg_from_sp (void)
7558 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7559 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7563 do_vfp_reg2_from_sp2 (void)
7565 constraint (inst
.operands
[2].imm
!= 2,
7566 _("only two consecutive VFP SP registers allowed here"));
7567 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7568 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7569 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7573 do_vfp_sp_from_reg (void)
7575 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
7576 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7580 do_vfp_sp2_from_reg2 (void)
7582 constraint (inst
.operands
[0].imm
!= 2,
7583 _("only two consecutive VFP SP registers allowed here"));
7584 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
7585 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7586 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7590 do_vfp_sp_ldst (void)
7592 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7593 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7597 do_vfp_dp_ldst (void)
7599 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7600 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7605 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
7607 if (inst
.operands
[0].writeback
)
7608 inst
.instruction
|= WRITE_BACK
;
7610 constraint (ldstm_type
!= VFP_LDSTMIA
,
7611 _("this addressing mode requires base-register writeback"));
7612 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7613 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
7614 inst
.instruction
|= inst
.operands
[1].imm
;
7618 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
7622 if (inst
.operands
[0].writeback
)
7623 inst
.instruction
|= WRITE_BACK
;
7625 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
7626 _("this addressing mode requires base-register writeback"));
7628 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7629 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7631 count
= inst
.operands
[1].imm
<< 1;
7632 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
7635 inst
.instruction
|= count
;
7639 do_vfp_sp_ldstmia (void)
7641 vfp_sp_ldstm (VFP_LDSTMIA
);
7645 do_vfp_sp_ldstmdb (void)
7647 vfp_sp_ldstm (VFP_LDSTMDB
);
7651 do_vfp_dp_ldstmia (void)
7653 vfp_dp_ldstm (VFP_LDSTMIA
);
7657 do_vfp_dp_ldstmdb (void)
7659 vfp_dp_ldstm (VFP_LDSTMDB
);
7663 do_vfp_xp_ldstmia (void)
7665 vfp_dp_ldstm (VFP_LDSTMIAX
);
7669 do_vfp_xp_ldstmdb (void)
7671 vfp_dp_ldstm (VFP_LDSTMDBX
);
7675 do_vfp_dp_rd_rm (void)
7677 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7678 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7682 do_vfp_dp_rn_rd (void)
7684 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
7685 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7689 do_vfp_dp_rd_rn (void)
7691 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7692 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7696 do_vfp_dp_rd_rn_rm (void)
7698 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7699 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7700 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
7706 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7710 do_vfp_dp_rm_rd_rn (void)
7712 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
7713 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7714 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
7717 /* VFPv3 instructions. */
7719 do_vfp_sp_const (void)
7721 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7722 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7723 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7727 do_vfp_dp_const (void)
7729 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7730 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7731 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7735 vfp_conv (int srcsize
)
7737 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
7738 inst
.instruction
|= (immbits
& 1) << 5;
7739 inst
.instruction
|= (immbits
>> 1);
7743 do_vfp_sp_conv_16 (void)
7745 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7750 do_vfp_dp_conv_16 (void)
7752 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7757 do_vfp_sp_conv_32 (void)
7759 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7764 do_vfp_dp_conv_32 (void)
7766 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7771 /* FPA instructions. Also in a logical order. */
7776 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7777 inst
.instruction
|= inst
.operands
[1].reg
;
7781 do_fpa_ldmstm (void)
7783 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7784 switch (inst
.operands
[1].imm
)
7786 case 1: inst
.instruction
|= CP_T_X
; break;
7787 case 2: inst
.instruction
|= CP_T_Y
; break;
7788 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
7793 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
7795 /* The instruction specified "ea" or "fd", so we can only accept
7796 [Rn]{!}. The instruction does not really support stacking or
7797 unstacking, so we have to emulate these by setting appropriate
7798 bits and offsets. */
7799 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7800 || inst
.reloc
.exp
.X_add_number
!= 0,
7801 _("this instruction does not support indexing"));
7803 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
7804 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
7806 if (!(inst
.instruction
& INDEX_UP
))
7807 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
7809 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
7811 inst
.operands
[2].preind
= 0;
7812 inst
.operands
[2].postind
= 1;
7816 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7820 /* iWMMXt instructions: strictly in alphabetical order. */
7823 do_iwmmxt_tandorc (void)
7825 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
7829 do_iwmmxt_textrc (void)
7831 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7832 inst
.instruction
|= inst
.operands
[1].imm
;
7836 do_iwmmxt_textrm (void)
7838 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7839 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7840 inst
.instruction
|= inst
.operands
[2].imm
;
7844 do_iwmmxt_tinsr (void)
7846 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7847 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7848 inst
.instruction
|= inst
.operands
[2].imm
;
7852 do_iwmmxt_tmia (void)
7854 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7855 inst
.instruction
|= inst
.operands
[1].reg
;
7856 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7860 do_iwmmxt_waligni (void)
7862 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7863 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7864 inst
.instruction
|= inst
.operands
[2].reg
;
7865 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
7869 do_iwmmxt_wmerge (void)
7871 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7872 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7873 inst
.instruction
|= inst
.operands
[2].reg
;
7874 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
7878 do_iwmmxt_wmov (void)
7880 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7881 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7882 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7883 inst
.instruction
|= inst
.operands
[1].reg
;
7887 do_iwmmxt_wldstbh (void)
7890 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7892 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
7894 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
7895 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
7899 do_iwmmxt_wldstw (void)
7901 /* RIWR_RIWC clears .isreg for a control register. */
7902 if (!inst
.operands
[0].isreg
)
7904 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7905 inst
.instruction
|= 0xf0000000;
7908 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7909 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7913 do_iwmmxt_wldstd (void)
7915 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7916 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
7917 && inst
.operands
[1].immisreg
)
7919 inst
.instruction
&= ~0x1a000ff;
7920 inst
.instruction
|= (0xf << 28);
7921 if (inst
.operands
[1].preind
)
7922 inst
.instruction
|= PRE_INDEX
;
7923 if (!inst
.operands
[1].negative
)
7924 inst
.instruction
|= INDEX_UP
;
7925 if (inst
.operands
[1].writeback
)
7926 inst
.instruction
|= WRITE_BACK
;
7927 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7928 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
7929 inst
.instruction
|= inst
.operands
[1].imm
;
7932 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
7936 do_iwmmxt_wshufh (void)
7938 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7939 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7940 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
7941 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
7945 do_iwmmxt_wzero (void)
7947 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7948 inst
.instruction
|= inst
.operands
[0].reg
;
7949 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7950 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7954 do_iwmmxt_wrwrwr_or_imm5 (void)
7956 if (inst
.operands
[2].isreg
)
7959 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
7960 _("immediate operand requires iWMMXt2"));
7962 if (inst
.operands
[2].imm
== 0)
7964 switch ((inst
.instruction
>> 20) & 0xf)
7970 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
7971 inst
.operands
[2].imm
= 16;
7972 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
7978 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
7979 inst
.operands
[2].imm
= 32;
7980 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
7987 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
7989 wrn
= (inst
.instruction
>> 16) & 0xf;
7990 inst
.instruction
&= 0xff0fff0f;
7991 inst
.instruction
|= wrn
;
7992 /* Bail out here; the instruction is now assembled. */
7997 /* Map 32 -> 0, etc. */
7998 inst
.operands
[2].imm
&= 0x1f;
7999 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8003 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8004 operations first, then control, shift, and load/store. */
8006 /* Insns like "foo X,Y,Z". */
8009 do_mav_triple (void)
8011 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8012 inst
.instruction
|= inst
.operands
[1].reg
;
8013 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8016 /* Insns like "foo W,X,Y,Z".
8017 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8022 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8023 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8024 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8025 inst
.instruction
|= inst
.operands
[3].reg
;
8028 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8032 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8035 /* Maverick shift immediate instructions.
8036 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8037 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8042 int imm
= inst
.operands
[2].imm
;
8044 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8045 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8047 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8048 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8049 Bit 4 should be 0. */
8050 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8052 inst
.instruction
|= imm
;
8055 /* XScale instructions. Also sorted arithmetic before move. */
8057 /* Xscale multiply-accumulate (argument parse)
8060 MIAxycc acc0,Rm,Rs. */
8065 inst
.instruction
|= inst
.operands
[1].reg
;
8066 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8069 /* Xscale move-accumulator-register (argument parse)
8071 MARcc acc0,RdLo,RdHi. */
8076 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8077 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8080 /* Xscale move-register-accumulator (argument parse)
8082 MRAcc RdLo,RdHi,acc0. */
8087 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8088 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8089 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8092 /* Encoding functions relevant only to Thumb. */
8094 /* inst.operands[i] is a shifted-register operand; encode
8095 it into inst.instruction in the format used by Thumb32. */
8098 encode_thumb32_shifted_operand (int i
)
8100 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8101 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8103 constraint (inst
.operands
[i
].immisreg
,
8104 _("shift by register not allowed in thumb mode"));
8105 inst
.instruction
|= inst
.operands
[i
].reg
;
8106 if (shift
== SHIFT_RRX
)
8107 inst
.instruction
|= SHIFT_ROR
<< 4;
8110 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8111 _("expression too complex"));
8113 constraint (value
> 32
8114 || (value
== 32 && (shift
== SHIFT_LSL
8115 || shift
== SHIFT_ROR
)),
8116 _("shift expression is too large"));
8120 else if (value
== 32)
8123 inst
.instruction
|= shift
<< 4;
8124 inst
.instruction
|= (value
& 0x1c) << 10;
8125 inst
.instruction
|= (value
& 0x03) << 6;
8130 /* inst.operands[i] was set up by parse_address. Encode it into a
8131 Thumb32 format load or store instruction. Reject forms that cannot
8132 be used with such instructions. If is_t is true, reject forms that
8133 cannot be used with a T instruction; if is_d is true, reject forms
8134 that cannot be used with a D instruction. */
8137 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8139 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8141 constraint (!inst
.operands
[i
].isreg
,
8142 _("Instruction does not support =N addresses"));
8144 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8145 if (inst
.operands
[i
].immisreg
)
8147 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
8148 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8149 constraint (inst
.operands
[i
].negative
,
8150 _("Thumb does not support negative register indexing"));
8151 constraint (inst
.operands
[i
].postind
,
8152 _("Thumb does not support register post-indexing"));
8153 constraint (inst
.operands
[i
].writeback
,
8154 _("Thumb does not support register indexing with writeback"));
8155 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8156 _("Thumb supports only LSL in shifted register indexing"));
8158 inst
.instruction
|= inst
.operands
[i
].imm
;
8159 if (inst
.operands
[i
].shifted
)
8161 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8162 _("expression too complex"));
8163 constraint (inst
.reloc
.exp
.X_add_number
< 0
8164 || inst
.reloc
.exp
.X_add_number
> 3,
8165 _("shift out of range"));
8166 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8168 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8170 else if (inst
.operands
[i
].preind
)
8172 constraint (is_pc
&& inst
.operands
[i
].writeback
,
8173 _("cannot use writeback with PC-relative addressing"));
8174 constraint (is_t
&& inst
.operands
[i
].writeback
,
8175 _("cannot use writeback with this instruction"));
8179 inst
.instruction
|= 0x01000000;
8180 if (inst
.operands
[i
].writeback
)
8181 inst
.instruction
|= 0x00200000;
8185 inst
.instruction
|= 0x00000c00;
8186 if (inst
.operands
[i
].writeback
)
8187 inst
.instruction
|= 0x00000100;
8189 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8191 else if (inst
.operands
[i
].postind
)
8193 assert (inst
.operands
[i
].writeback
);
8194 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8195 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8198 inst
.instruction
|= 0x00200000;
8200 inst
.instruction
|= 0x00000900;
8201 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8203 else /* unindexed - only for coprocessor */
8204 inst
.error
= _("instruction does not accept unindexed addressing");
8207 /* Table of Thumb instructions which exist in both 16- and 32-bit
8208 encodings (the latter only in post-V6T2 cores). The index is the
8209 value used in the insns table below. When there is more than one
8210 possible 16-bit encoding for the instruction, this table always
8212 Also contains several pseudo-instructions used during relaxation. */
8213 #define T16_32_TAB \
8214 X(adc, 4140, eb400000), \
8215 X(adcs, 4140, eb500000), \
8216 X(add, 1c00, eb000000), \
8217 X(adds, 1c00, eb100000), \
8218 X(addi, 0000, f1000000), \
8219 X(addis, 0000, f1100000), \
8220 X(add_pc,000f, f20f0000), \
8221 X(add_sp,000d, f10d0000), \
8222 X(adr, 000f, f20f0000), \
8223 X(and, 4000, ea000000), \
8224 X(ands, 4000, ea100000), \
8225 X(asr, 1000, fa40f000), \
8226 X(asrs, 1000, fa50f000), \
8227 X(b, e000, f000b000), \
8228 X(bcond, d000, f0008000), \
8229 X(bic, 4380, ea200000), \
8230 X(bics, 4380, ea300000), \
8231 X(cmn, 42c0, eb100f00), \
8232 X(cmp, 2800, ebb00f00), \
8233 X(cpsie, b660, f3af8400), \
8234 X(cpsid, b670, f3af8600), \
8235 X(cpy, 4600, ea4f0000), \
8236 X(dec_sp,80dd, f1bd0d00), \
8237 X(eor, 4040, ea800000), \
8238 X(eors, 4040, ea900000), \
8239 X(inc_sp,00dd, f10d0d00), \
8240 X(ldmia, c800, e8900000), \
8241 X(ldr, 6800, f8500000), \
8242 X(ldrb, 7800, f8100000), \
8243 X(ldrh, 8800, f8300000), \
8244 X(ldrsb, 5600, f9100000), \
8245 X(ldrsh, 5e00, f9300000), \
8246 X(ldr_pc,4800, f85f0000), \
8247 X(ldr_pc2,4800, f85f0000), \
8248 X(ldr_sp,9800, f85d0000), \
8249 X(lsl, 0000, fa00f000), \
8250 X(lsls, 0000, fa10f000), \
8251 X(lsr, 0800, fa20f000), \
8252 X(lsrs, 0800, fa30f000), \
8253 X(mov, 2000, ea4f0000), \
8254 X(movs, 2000, ea5f0000), \
8255 X(mul, 4340, fb00f000), \
8256 X(muls, 4340, ffffffff), /* no 32b muls */ \
8257 X(mvn, 43c0, ea6f0000), \
8258 X(mvns, 43c0, ea7f0000), \
8259 X(neg, 4240, f1c00000), /* rsb #0 */ \
8260 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8261 X(orr, 4300, ea400000), \
8262 X(orrs, 4300, ea500000), \
8263 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8264 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8265 X(rev, ba00, fa90f080), \
8266 X(rev16, ba40, fa90f090), \
8267 X(revsh, bac0, fa90f0b0), \
8268 X(ror, 41c0, fa60f000), \
8269 X(rors, 41c0, fa70f000), \
8270 X(sbc, 4180, eb600000), \
8271 X(sbcs, 4180, eb700000), \
8272 X(stmia, c000, e8800000), \
8273 X(str, 6000, f8400000), \
8274 X(strb, 7000, f8000000), \
8275 X(strh, 8000, f8200000), \
8276 X(str_sp,9000, f84d0000), \
8277 X(sub, 1e00, eba00000), \
8278 X(subs, 1e00, ebb00000), \
8279 X(subi, 8000, f1a00000), \
8280 X(subis, 8000, f1b00000), \
8281 X(sxtb, b240, fa4ff080), \
8282 X(sxth, b200, fa0ff080), \
8283 X(tst, 4200, ea100f00), \
8284 X(uxtb, b2c0, fa5ff080), \
8285 X(uxth, b280, fa1ff080), \
8286 X(nop, bf00, f3af8000), \
8287 X(yield, bf10, f3af8001), \
8288 X(wfe, bf20, f3af8002), \
8289 X(wfi, bf30, f3af8003), \
8290 X(sev, bf40, f3af9004), /* typo, 8004? */
8292 /* To catch errors in encoding functions, the codes are all offset by
8293 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8294 as 16-bit instructions. */
8295 #define X(a,b,c) T_MNEM_##a
8296 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
8299 #define X(a,b,c) 0x##b
8300 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
8301 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8304 #define X(a,b,c) 0x##c
8305 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
8306 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8307 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8311 /* Thumb instruction encoders, in alphabetical order. */
8315 do_t_add_sub_w (void)
8319 Rd
= inst
.operands
[0].reg
;
8320 Rn
= inst
.operands
[1].reg
;
8322 constraint (Rd
== 15, _("PC not allowed as destination"));
8323 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
8324 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8327 /* Parse an add or subtract instruction. We get here with inst.instruction
8328 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8335 Rd
= inst
.operands
[0].reg
;
8336 Rs
= (inst
.operands
[1].present
8337 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8338 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8346 flags
= (inst
.instruction
== T_MNEM_adds
8347 || inst
.instruction
== T_MNEM_subs
);
8349 narrow
= (current_it_mask
== 0);
8351 narrow
= (current_it_mask
!= 0);
8352 if (!inst
.operands
[2].isreg
)
8356 add
= (inst
.instruction
== T_MNEM_add
8357 || inst
.instruction
== T_MNEM_adds
);
8359 if (inst
.size_req
!= 4)
8361 /* Attempt to use a narrow opcode, with relaxation if
8363 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
8364 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
8365 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
8366 opcode
= T_MNEM_add_sp
;
8367 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
8368 opcode
= T_MNEM_add_pc
;
8369 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
8372 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
8374 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
8378 inst
.instruction
= THUMB_OP16(opcode
);
8379 inst
.instruction
|= (Rd
<< 4) | Rs
;
8380 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8381 if (inst
.size_req
!= 2)
8382 inst
.relax
= opcode
;
8385 constraint (inst
.size_req
== 2, BAD_HIREG
);
8387 if (inst
.size_req
== 4
8388 || (inst
.size_req
!= 2 && !opcode
))
8392 /* Always use addw/subw. */
8393 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
8394 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8398 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8399 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
8402 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8404 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
8406 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8407 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8412 Rn
= inst
.operands
[2].reg
;
8413 /* See if we can do this with a 16-bit instruction. */
8414 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
8416 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8421 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
8422 || inst
.instruction
== T_MNEM_add
)
8425 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8429 if (inst
.instruction
== T_MNEM_add
)
8433 inst
.instruction
= T_OPCODE_ADD_HI
;
8434 inst
.instruction
|= (Rd
& 8) << 4;
8435 inst
.instruction
|= (Rd
& 7);
8436 inst
.instruction
|= Rn
<< 3;
8439 /* ... because addition is commutative! */
8442 inst
.instruction
= T_OPCODE_ADD_HI
;
8443 inst
.instruction
|= (Rd
& 8) << 4;
8444 inst
.instruction
|= (Rd
& 7);
8445 inst
.instruction
|= Rs
<< 3;
8450 /* If we get here, it can't be done in 16 bits. */
8451 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
8452 _("shift must be constant"));
8453 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8454 inst
.instruction
|= Rd
<< 8;
8455 inst
.instruction
|= Rs
<< 16;
8456 encode_thumb32_shifted_operand (2);
8461 constraint (inst
.instruction
== T_MNEM_adds
8462 || inst
.instruction
== T_MNEM_subs
,
8465 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
8467 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
8468 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
8471 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8473 inst
.instruction
|= (Rd
<< 4) | Rs
;
8474 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8478 Rn
= inst
.operands
[2].reg
;
8479 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
8481 /* We now have Rd, Rs, and Rn set to registers. */
8482 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8484 /* Can't do this for SUB. */
8485 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
8486 inst
.instruction
= T_OPCODE_ADD_HI
;
8487 inst
.instruction
|= (Rd
& 8) << 4;
8488 inst
.instruction
|= (Rd
& 7);
8490 inst
.instruction
|= Rn
<< 3;
8492 inst
.instruction
|= Rs
<< 3;
8494 constraint (1, _("dest must overlap one source register"));
8498 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8499 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
8500 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8508 if (unified_syntax
&& inst
.size_req
== 0 && inst
.operands
[0].reg
<= 7)
8510 /* Defer to section relaxation. */
8511 inst
.relax
= inst
.instruction
;
8512 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8513 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8515 else if (unified_syntax
&& inst
.size_req
!= 2)
8517 /* Generate a 32-bit opcode. */
8518 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8519 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8520 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
8521 inst
.reloc
.pc_rel
= 1;
8525 /* Generate a 16-bit opcode. */
8526 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8527 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8528 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
8529 inst
.reloc
.pc_rel
= 1;
8531 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8535 /* Arithmetic instructions for which there is just one 16-bit
8536 instruction encoding, and it allows only two low registers.
8537 For maximal compatibility with ARM syntax, we allow three register
8538 operands even when Thumb-32 instructions are not available, as long
8539 as the first two are identical. For instance, both "sbc r0,r1" and
8540 "sbc r0,r0,r1" are allowed. */
8546 Rd
= inst
.operands
[0].reg
;
8547 Rs
= (inst
.operands
[1].present
8548 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8549 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8550 Rn
= inst
.operands
[2].reg
;
8554 if (!inst
.operands
[2].isreg
)
8556 /* For an immediate, we always generate a 32-bit opcode;
8557 section relaxation will shrink it later if possible. */
8558 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8559 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8560 inst
.instruction
|= Rd
<< 8;
8561 inst
.instruction
|= Rs
<< 16;
8562 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8568 /* See if we can do this with a 16-bit instruction. */
8569 if (THUMB_SETS_FLAGS (inst
.instruction
))
8570 narrow
= current_it_mask
== 0;
8572 narrow
= current_it_mask
!= 0;
8574 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8576 if (inst
.operands
[2].shifted
)
8578 if (inst
.size_req
== 4)
8584 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8585 inst
.instruction
|= Rd
;
8586 inst
.instruction
|= Rn
<< 3;
8590 /* If we get here, it can't be done in 16 bits. */
8591 constraint (inst
.operands
[2].shifted
8592 && inst
.operands
[2].immisreg
,
8593 _("shift must be constant"));
8594 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8595 inst
.instruction
|= Rd
<< 8;
8596 inst
.instruction
|= Rs
<< 16;
8597 encode_thumb32_shifted_operand (2);
8602 /* On its face this is a lie - the instruction does set the
8603 flags. However, the only supported mnemonic in this mode
8605 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8607 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8608 _("unshifted register required"));
8609 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8610 constraint (Rd
!= Rs
,
8611 _("dest and source1 must be the same register"));
8613 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8614 inst
.instruction
|= Rd
;
8615 inst
.instruction
|= Rn
<< 3;
8619 /* Similarly, but for instructions where the arithmetic operation is
8620 commutative, so we can allow either of them to be different from
8621 the destination operand in a 16-bit instruction. For instance, all
8622 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8629 Rd
= inst
.operands
[0].reg
;
8630 Rs
= (inst
.operands
[1].present
8631 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8632 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8633 Rn
= inst
.operands
[2].reg
;
8637 if (!inst
.operands
[2].isreg
)
8639 /* For an immediate, we always generate a 32-bit opcode;
8640 section relaxation will shrink it later if possible. */
8641 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8642 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8643 inst
.instruction
|= Rd
<< 8;
8644 inst
.instruction
|= Rs
<< 16;
8645 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8651 /* See if we can do this with a 16-bit instruction. */
8652 if (THUMB_SETS_FLAGS (inst
.instruction
))
8653 narrow
= current_it_mask
== 0;
8655 narrow
= current_it_mask
!= 0;
8657 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8659 if (inst
.operands
[2].shifted
)
8661 if (inst
.size_req
== 4)
8668 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8669 inst
.instruction
|= Rd
;
8670 inst
.instruction
|= Rn
<< 3;
8675 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8676 inst
.instruction
|= Rd
;
8677 inst
.instruction
|= Rs
<< 3;
8682 /* If we get here, it can't be done in 16 bits. */
8683 constraint (inst
.operands
[2].shifted
8684 && inst
.operands
[2].immisreg
,
8685 _("shift must be constant"));
8686 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8687 inst
.instruction
|= Rd
<< 8;
8688 inst
.instruction
|= Rs
<< 16;
8689 encode_thumb32_shifted_operand (2);
8694 /* On its face this is a lie - the instruction does set the
8695 flags. However, the only supported mnemonic in this mode
8697 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8699 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8700 _("unshifted register required"));
8701 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8703 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8704 inst
.instruction
|= Rd
;
8707 inst
.instruction
|= Rn
<< 3;
8709 inst
.instruction
|= Rs
<< 3;
8711 constraint (1, _("dest must overlap one source register"));
8718 if (inst
.operands
[0].present
)
8720 constraint ((inst
.instruction
& 0xf0) != 0x40
8721 && inst
.operands
[0].imm
!= 0xf,
8722 "bad barrier type");
8723 inst
.instruction
|= inst
.operands
[0].imm
;
8726 inst
.instruction
|= 0xf;
8732 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8733 constraint (msb
> 32, _("bit-field extends past end of register"));
8734 /* The instruction encoding stores the LSB and MSB,
8735 not the LSB and width. */
8736 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8737 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
8738 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
8739 inst
.instruction
|= msb
- 1;
8747 /* #0 in second position is alternative syntax for bfc, which is
8748 the same instruction but with REG_PC in the Rm field. */
8749 if (!inst
.operands
[1].isreg
)
8750 inst
.operands
[1].reg
= REG_PC
;
8752 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8753 constraint (msb
> 32, _("bit-field extends past end of register"));
8754 /* The instruction encoding stores the LSB and MSB,
8755 not the LSB and width. */
8756 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8757 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8758 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8759 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8760 inst
.instruction
|= msb
- 1;
8766 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8767 _("bit-field extends past end of register"));
8768 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8769 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8770 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8771 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8772 inst
.instruction
|= inst
.operands
[3].imm
- 1;
8775 /* ARM V5 Thumb BLX (argument parse)
8776 BLX <target_addr> which is BLX(1)
8777 BLX <Rm> which is BLX(2)
8778 Unfortunately, there are two different opcodes for this mnemonic.
8779 So, the insns[].value is not used, and the code here zaps values
8780 into inst.instruction.
8782 ??? How to take advantage of the additional two bits of displacement
8783 available in Thumb32 mode? Need new relocation? */
8788 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8789 if (inst
.operands
[0].isreg
)
8790 /* We have a register, so this is BLX(2). */
8791 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8794 /* No register. This must be BLX(1). */
8795 inst
.instruction
= 0xf000e800;
8797 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8798 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8801 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
8802 inst
.reloc
.pc_rel
= 1;
8812 if (current_it_mask
)
8814 /* Conditional branches inside IT blocks are encoded as unconditional
8817 /* A branch must be the last instruction in an IT block. */
8818 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
8823 if (cond
!= COND_ALWAYS
)
8824 opcode
= T_MNEM_bcond
;
8826 opcode
= inst
.instruction
;
8828 if (unified_syntax
&& inst
.size_req
== 4)
8830 inst
.instruction
= THUMB_OP32(opcode
);
8831 if (cond
== COND_ALWAYS
)
8832 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
8835 assert (cond
!= 0xF);
8836 inst
.instruction
|= cond
<< 22;
8837 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
8842 inst
.instruction
= THUMB_OP16(opcode
);
8843 if (cond
== COND_ALWAYS
)
8844 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
8847 inst
.instruction
|= cond
<< 8;
8848 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
8850 /* Allow section relaxation. */
8851 if (unified_syntax
&& inst
.size_req
!= 2)
8852 inst
.relax
= opcode
;
8855 inst
.reloc
.pc_rel
= 1;
8861 constraint (inst
.cond
!= COND_ALWAYS
,
8862 _("instruction is always unconditional"));
8863 if (inst
.operands
[0].present
)
8865 constraint (inst
.operands
[0].imm
> 255,
8866 _("immediate value out of range"));
8867 inst
.instruction
|= inst
.operands
[0].imm
;
8872 do_t_branch23 (void)
8874 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8875 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8876 inst
.reloc
.pc_rel
= 1;
8878 /* If the destination of the branch is a defined symbol which does not have
8879 the THUMB_FUNC attribute, then we must be calling a function which has
8880 the (interfacearm) attribute. We look for the Thumb entry point to that
8881 function and change the branch to refer to that function instead. */
8882 if ( inst
.reloc
.exp
.X_op
== O_symbol
8883 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8884 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8885 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8886 inst
.reloc
.exp
.X_add_symbol
=
8887 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
8893 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8894 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8895 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8896 should cause the alignment to be checked once it is known. This is
8897 because BX PC only works if the instruction is word aligned. */
8903 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8904 if (inst
.operands
[0].reg
== REG_PC
)
8905 as_tsktsk (_("use of r15 in bxj is not really useful"));
8907 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8913 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8914 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8915 inst
.instruction
|= inst
.operands
[1].reg
;
8921 constraint (current_it_mask
, BAD_NOT_IT
);
8922 inst
.instruction
|= inst
.operands
[0].imm
;
8928 constraint (current_it_mask
, BAD_NOT_IT
);
8930 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
8931 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
8933 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
8934 inst
.instruction
= 0xf3af8000;
8935 inst
.instruction
|= imod
<< 9;
8936 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
8937 if (inst
.operands
[1].present
)
8938 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
8942 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
8943 && (inst
.operands
[0].imm
& 4),
8944 _("selected processor does not support 'A' form "
8945 "of this instruction"));
8946 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
8947 _("Thumb does not support the 2-argument "
8948 "form of this instruction"));
8949 inst
.instruction
|= inst
.operands
[0].imm
;
8953 /* THUMB CPY instruction (argument parse). */
8958 if (inst
.size_req
== 4)
8960 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
8961 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8962 inst
.instruction
|= inst
.operands
[1].reg
;
8966 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8967 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8968 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8975 constraint (current_it_mask
, BAD_NOT_IT
);
8976 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8977 inst
.instruction
|= inst
.operands
[0].reg
;
8978 inst
.reloc
.pc_rel
= 1;
8979 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
8985 inst
.instruction
|= inst
.operands
[0].imm
;
8991 if (!inst
.operands
[1].present
)
8992 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8993 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8994 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8995 inst
.instruction
|= inst
.operands
[2].reg
;
9001 if (unified_syntax
&& inst
.size_req
== 4)
9002 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9004 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9010 unsigned int cond
= inst
.operands
[0].imm
;
9012 constraint (current_it_mask
, BAD_NOT_IT
);
9013 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
9016 /* If the condition is a negative condition, invert the mask. */
9017 if ((cond
& 0x1) == 0x0)
9019 unsigned int mask
= inst
.instruction
& 0x000f;
9021 if ((mask
& 0x7) == 0)
9022 /* no conversion needed */;
9023 else if ((mask
& 0x3) == 0)
9025 else if ((mask
& 0x1) == 0)
9030 inst
.instruction
&= 0xfff0;
9031 inst
.instruction
|= mask
;
9034 inst
.instruction
|= cond
<< 4;
9040 /* This really doesn't seem worth it. */
9041 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9042 _("expression too complex"));
9043 constraint (inst
.operands
[1].writeback
,
9044 _("Thumb load/store multiple does not support {reglist}^"));
9048 /* See if we can use a 16-bit instruction. */
9049 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9050 && inst
.size_req
!= 4
9051 && inst
.operands
[0].reg
<= 7
9052 && !(inst
.operands
[1].imm
& ~0xff)
9053 && (inst
.instruction
== T_MNEM_stmia
9054 ? inst
.operands
[0].writeback
9055 : (inst
.operands
[0].writeback
9056 == !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))))
9058 if (inst
.instruction
== T_MNEM_stmia
9059 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9060 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9061 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9062 inst
.operands
[0].reg
);
9064 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9065 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9066 inst
.instruction
|= inst
.operands
[1].imm
;
9070 if (inst
.operands
[1].imm
& (1 << 13))
9071 as_warn (_("SP should not be in register list"));
9072 if (inst
.instruction
== T_MNEM_stmia
)
9074 if (inst
.operands
[1].imm
& (1 << 15))
9075 as_warn (_("PC should not be in register list"));
9076 if (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9077 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9078 inst
.operands
[0].reg
);
9082 if (inst
.operands
[1].imm
& (1 << 14)
9083 && inst
.operands
[1].imm
& (1 << 15))
9084 as_warn (_("LR and PC should not both be in register list"));
9085 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9086 && inst
.operands
[0].writeback
)
9087 as_warn (_("base register should not be in register list "
9088 "when written back"));
9090 if (inst
.instruction
< 0xffff)
9091 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9092 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9093 inst
.instruction
|= inst
.operands
[1].imm
;
9094 if (inst
.operands
[0].writeback
)
9095 inst
.instruction
|= WRITE_BACK
;
9100 constraint (inst
.operands
[0].reg
> 7
9101 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
9102 if (inst
.instruction
== T_MNEM_stmia
)
9104 if (!inst
.operands
[0].writeback
)
9105 as_warn (_("this instruction will write back the base register"));
9106 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9107 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9108 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9109 inst
.operands
[0].reg
);
9113 if (!inst
.operands
[0].writeback
9114 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9115 as_warn (_("this instruction will write back the base register"));
9116 else if (inst
.operands
[0].writeback
9117 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9118 as_warn (_("this instruction will not write back the base register"));
9121 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9122 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9123 inst
.instruction
|= inst
.operands
[1].imm
;
9130 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9131 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9132 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9133 || inst
.operands
[1].negative
,
9136 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9137 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9138 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9144 if (!inst
.operands
[1].present
)
9146 constraint (inst
.operands
[0].reg
== REG_LR
,
9147 _("r14 not allowed as first register "
9148 "when second register is omitted"));
9149 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9151 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
9154 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9155 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9156 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9162 unsigned long opcode
;
9165 opcode
= inst
.instruction
;
9168 if (!inst
.operands
[1].isreg
)
9170 if (opcode
<= 0xffff)
9171 inst
.instruction
= THUMB_OP32 (opcode
);
9172 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9175 if (inst
.operands
[1].isreg
9176 && !inst
.operands
[1].writeback
9177 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
9178 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
9180 && inst
.size_req
!= 4)
9182 /* Insn may have a 16-bit form. */
9183 Rn
= inst
.operands
[1].reg
;
9184 if (inst
.operands
[1].immisreg
)
9186 inst
.instruction
= THUMB_OP16 (opcode
);
9188 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
9191 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
9192 && opcode
!= T_MNEM_ldrsb
)
9193 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
9194 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
9201 if (inst
.reloc
.pc_rel
)
9202 opcode
= T_MNEM_ldr_pc2
;
9204 opcode
= T_MNEM_ldr_pc
;
9208 if (opcode
== T_MNEM_ldr
)
9209 opcode
= T_MNEM_ldr_sp
;
9211 opcode
= T_MNEM_str_sp
;
9213 inst
.instruction
= inst
.operands
[0].reg
<< 8;
9217 inst
.instruction
= inst
.operands
[0].reg
;
9218 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9220 inst
.instruction
|= THUMB_OP16 (opcode
);
9221 if (inst
.size_req
== 2)
9222 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9224 inst
.relax
= opcode
;
9228 /* Definitely a 32-bit variant. */
9229 inst
.instruction
= THUMB_OP32 (opcode
);
9230 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9231 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9235 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9237 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
9239 /* Only [Rn,Rm] is acceptable. */
9240 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
9241 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
9242 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
9243 || inst
.operands
[1].negative
,
9244 _("Thumb does not support this addressing mode"));
9245 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9249 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9250 if (!inst
.operands
[1].isreg
)
9251 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9254 constraint (!inst
.operands
[1].preind
9255 || inst
.operands
[1].shifted
9256 || inst
.operands
[1].writeback
,
9257 _("Thumb does not support this addressing mode"));
9258 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
9260 constraint (inst
.instruction
& 0x0600,
9261 _("byte or halfword not valid for base register"));
9262 constraint (inst
.operands
[1].reg
== REG_PC
9263 && !(inst
.instruction
& THUMB_LOAD_BIT
),
9264 _("r15 based store not allowed"));
9265 constraint (inst
.operands
[1].immisreg
,
9266 _("invalid base register for register offset"));
9268 if (inst
.operands
[1].reg
== REG_PC
)
9269 inst
.instruction
= T_OPCODE_LDR_PC
;
9270 else if (inst
.instruction
& THUMB_LOAD_BIT
)
9271 inst
.instruction
= T_OPCODE_LDR_SP
;
9273 inst
.instruction
= T_OPCODE_STR_SP
;
9275 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9276 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9280 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
9281 if (!inst
.operands
[1].immisreg
)
9283 /* Immediate offset. */
9284 inst
.instruction
|= inst
.operands
[0].reg
;
9285 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9286 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9290 /* Register offset. */
9291 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
9292 constraint (inst
.operands
[1].negative
,
9293 _("Thumb does not support this addressing mode"));
9296 switch (inst
.instruction
)
9298 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
9299 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
9300 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
9301 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
9302 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
9303 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
9304 case 0x5600 /* ldrsb */:
9305 case 0x5e00 /* ldrsh */: break;
9309 inst
.instruction
|= inst
.operands
[0].reg
;
9310 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9311 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
9317 if (!inst
.operands
[1].present
)
9319 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9320 constraint (inst
.operands
[0].reg
== REG_LR
,
9321 _("r14 not allowed here"));
9323 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9324 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9325 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
9332 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9333 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
9339 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9340 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9341 inst
.instruction
|= inst
.operands
[2].reg
;
9342 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9348 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9349 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9350 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9351 inst
.instruction
|= inst
.operands
[3].reg
;
9359 int r0off
= (inst
.instruction
== T_MNEM_mov
9360 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
9361 unsigned long opcode
;
9363 bfd_boolean low_regs
;
9365 low_regs
= (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7);
9366 opcode
= inst
.instruction
;
9367 if (current_it_mask
)
9368 narrow
= opcode
!= T_MNEM_movs
;
9370 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
9371 if (inst
.size_req
== 4
9372 || inst
.operands
[1].shifted
)
9375 if (!inst
.operands
[1].isreg
)
9377 /* Immediate operand. */
9378 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
9380 if (low_regs
&& narrow
)
9382 inst
.instruction
= THUMB_OP16 (opcode
);
9383 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9384 if (inst
.size_req
== 2)
9385 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9387 inst
.relax
= opcode
;
9391 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9392 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9393 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9394 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9399 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9400 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9401 encode_thumb32_shifted_operand (1);
9404 switch (inst
.instruction
)
9407 inst
.instruction
= T_OPCODE_MOV_HR
;
9408 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9409 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9410 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9414 /* We know we have low registers at this point.
9415 Generate ADD Rd, Rs, #0. */
9416 inst
.instruction
= T_OPCODE_ADD_I3
;
9417 inst
.instruction
|= inst
.operands
[0].reg
;
9418 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9424 inst
.instruction
= T_OPCODE_CMP_LR
;
9425 inst
.instruction
|= inst
.operands
[0].reg
;
9426 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9430 inst
.instruction
= T_OPCODE_CMP_HR
;
9431 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9432 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9433 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9440 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9441 if (inst
.operands
[1].isreg
)
9443 if (inst
.operands
[0].reg
< 8 && inst
.operands
[1].reg
< 8)
9445 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9446 since a MOV instruction produces unpredictable results. */
9447 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9448 inst
.instruction
= T_OPCODE_ADD_I3
;
9450 inst
.instruction
= T_OPCODE_CMP_LR
;
9452 inst
.instruction
|= inst
.operands
[0].reg
;
9453 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9457 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9458 inst
.instruction
= T_OPCODE_MOV_HR
;
9460 inst
.instruction
= T_OPCODE_CMP_HR
;
9466 constraint (inst
.operands
[0].reg
> 7,
9467 _("only lo regs allowed with immediate"));
9468 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9469 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9479 top
= (inst
.instruction
& 0x00800000) != 0;
9480 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
9482 constraint (top
, _(":lower16: not allowed this instruction"));
9483 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
9485 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
9487 constraint (!top
, _(":upper16: not allowed this instruction"));
9488 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
9491 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9492 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9494 imm
= inst
.reloc
.exp
.X_add_number
;
9495 inst
.instruction
|= (imm
& 0xf000) << 4;
9496 inst
.instruction
|= (imm
& 0x0800) << 15;
9497 inst
.instruction
|= (imm
& 0x0700) << 4;
9498 inst
.instruction
|= (imm
& 0x00ff);
9507 int r0off
= (inst
.instruction
== T_MNEM_mvn
9508 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
9511 if (inst
.size_req
== 4
9512 || inst
.instruction
> 0xffff
9513 || inst
.operands
[1].shifted
9514 || inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9516 else if (inst
.instruction
== T_MNEM_cmn
)
9518 else if (THUMB_SETS_FLAGS (inst
.instruction
))
9519 narrow
= (current_it_mask
== 0);
9521 narrow
= (current_it_mask
!= 0);
9523 if (!inst
.operands
[1].isreg
)
9525 /* For an immediate, we always generate a 32-bit opcode;
9526 section relaxation will shrink it later if possible. */
9527 if (inst
.instruction
< 0xffff)
9528 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9529 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9530 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9531 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9535 /* See if we can do this with a 16-bit instruction. */
9538 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9539 inst
.instruction
|= inst
.operands
[0].reg
;
9540 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9544 constraint (inst
.operands
[1].shifted
9545 && inst
.operands
[1].immisreg
,
9546 _("shift must be constant"));
9547 if (inst
.instruction
< 0xffff)
9548 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9549 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9550 encode_thumb32_shifted_operand (1);
9556 constraint (inst
.instruction
> 0xffff
9557 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
9558 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
9559 _("unshifted register required"));
9560 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9563 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9564 inst
.instruction
|= inst
.operands
[0].reg
;
9565 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9574 if (do_vfp_nsyn_mrs () == SUCCESS
)
9577 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
9580 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
9581 _("selected processor does not support "
9582 "requested special purpose register"));
9586 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9587 _("selected processor does not support "
9588 "requested special purpose register %x"));
9589 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9590 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
9591 _("'CPSR' or 'SPSR' expected"));
9594 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9595 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9596 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
9604 if (do_vfp_nsyn_msr () == SUCCESS
)
9607 constraint (!inst
.operands
[1].isreg
,
9608 _("Thumb encoding does not support an immediate here"));
9609 flags
= inst
.operands
[0].imm
;
9612 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9613 _("selected processor does not support "
9614 "requested special purpose register"));
9618 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
9619 _("selected processor does not support "
9620 "requested special purpose register"));
9623 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9624 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
9625 inst
.instruction
|= (flags
& 0xff);
9626 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9632 if (!inst
.operands
[2].present
)
9633 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9635 /* There is no 32-bit MULS and no 16-bit MUL. */
9636 if (unified_syntax
&& inst
.instruction
== T_MNEM_mul
)
9638 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9639 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9640 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9641 inst
.instruction
|= inst
.operands
[2].reg
<< 0;
9645 constraint (!unified_syntax
9646 && inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
9647 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9650 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9651 inst
.instruction
|= inst
.operands
[0].reg
;
9653 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9654 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9655 else if (inst
.operands
[0].reg
== inst
.operands
[2].reg
)
9656 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9658 constraint (1, _("dest must overlap one source register"));
9665 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9666 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9667 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9668 inst
.instruction
|= inst
.operands
[3].reg
;
9670 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9671 as_tsktsk (_("rdhi and rdlo must be different"));
9679 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
9681 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9682 inst
.instruction
|= inst
.operands
[0].imm
;
9686 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9687 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
9692 constraint (inst
.operands
[0].present
,
9693 _("Thumb does not support NOP with hints"));
9694 inst
.instruction
= 0x46c0;
9705 if (THUMB_SETS_FLAGS (inst
.instruction
))
9706 narrow
= (current_it_mask
== 0);
9708 narrow
= (current_it_mask
!= 0);
9709 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9711 if (inst
.size_req
== 4)
9716 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9717 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9718 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9722 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9723 inst
.instruction
|= inst
.operands
[0].reg
;
9724 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9729 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9731 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9733 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9734 inst
.instruction
|= inst
.operands
[0].reg
;
9735 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9742 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9743 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9744 inst
.instruction
|= inst
.operands
[2].reg
;
9745 if (inst
.operands
[3].present
)
9747 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
9748 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9749 _("expression too complex"));
9750 inst
.instruction
|= (val
& 0x1c) << 10;
9751 inst
.instruction
|= (val
& 0x03) << 6;
9758 if (!inst
.operands
[3].present
)
9759 inst
.instruction
&= ~0x00000020;
9766 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9770 do_t_push_pop (void)
9774 constraint (inst
.operands
[0].writeback
,
9775 _("push/pop do not support {reglist}^"));
9776 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9777 _("expression too complex"));
9779 mask
= inst
.operands
[0].imm
;
9780 if ((mask
& ~0xff) == 0)
9781 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9782 else if ((inst
.instruction
== T_MNEM_push
9783 && (mask
& ~0xff) == 1 << REG_LR
)
9784 || (inst
.instruction
== T_MNEM_pop
9785 && (mask
& ~0xff) == 1 << REG_PC
))
9787 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9788 inst
.instruction
|= THUMB_PP_PC_LR
;
9791 else if (unified_syntax
)
9793 if (mask
& (1 << 13))
9794 inst
.error
= _("SP not allowed in register list");
9795 if (inst
.instruction
== T_MNEM_push
)
9797 if (mask
& (1 << 15))
9798 inst
.error
= _("PC not allowed in register list");
9802 if (mask
& (1 << 14)
9803 && mask
& (1 << 15))
9804 inst
.error
= _("LR and PC should not both be in register list");
9806 if ((mask
& (mask
- 1)) == 0)
9808 /* Single register push/pop implemented as str/ldr. */
9809 if (inst
.instruction
== T_MNEM_push
)
9810 inst
.instruction
= 0xf84d0d04; /* str reg, [sp, #-4]! */
9812 inst
.instruction
= 0xf85d0b04; /* ldr reg, [sp], #4 */
9813 mask
= ffs(mask
) - 1;
9817 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9821 inst
.error
= _("invalid register list to push/pop instruction");
9825 inst
.instruction
|= mask
;
9831 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9832 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9838 if (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9839 && inst
.size_req
!= 4)
9841 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9842 inst
.instruction
|= inst
.operands
[0].reg
;
9843 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9845 else if (unified_syntax
)
9847 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9848 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9849 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9850 inst
.instruction
|= inst
.operands
[1].reg
;
9853 inst
.error
= BAD_HIREG
;
9861 Rd
= inst
.operands
[0].reg
;
9862 Rs
= (inst
.operands
[1].present
9863 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9864 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9866 inst
.instruction
|= Rd
<< 8;
9867 inst
.instruction
|= Rs
<< 16;
9868 if (!inst
.operands
[2].isreg
)
9870 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9871 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9874 encode_thumb32_shifted_operand (2);
9880 constraint (current_it_mask
, BAD_NOT_IT
);
9881 if (inst
.operands
[0].imm
)
9882 inst
.instruction
|= 0x8;
9888 if (!inst
.operands
[1].present
)
9889 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9896 switch (inst
.instruction
)
9899 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
9901 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
9903 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
9905 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
9909 if (THUMB_SETS_FLAGS (inst
.instruction
))
9910 narrow
= (current_it_mask
== 0);
9912 narrow
= (current_it_mask
!= 0);
9913 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9915 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
9917 if (inst
.operands
[2].isreg
9918 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
9919 || inst
.operands
[2].reg
> 7))
9921 if (inst
.size_req
== 4)
9926 if (inst
.operands
[2].isreg
)
9928 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9929 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9930 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9931 inst
.instruction
|= inst
.operands
[2].reg
;
9935 inst
.operands
[1].shifted
= 1;
9936 inst
.operands
[1].shift_kind
= shift_kind
;
9937 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
9938 ? T_MNEM_movs
: T_MNEM_mov
);
9939 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9940 encode_thumb32_shifted_operand (1);
9941 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9942 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9947 if (inst
.operands
[2].isreg
)
9951 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9952 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9953 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9954 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9958 inst
.instruction
|= inst
.operands
[0].reg
;
9959 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9965 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9966 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9967 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9970 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9971 inst
.instruction
|= inst
.operands
[0].reg
;
9972 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9978 constraint (inst
.operands
[0].reg
> 7
9979 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
9980 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9982 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
9984 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
9985 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
9986 _("source1 and dest must be same register"));
9988 switch (inst
.instruction
)
9990 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9991 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9992 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9993 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9997 inst
.instruction
|= inst
.operands
[0].reg
;
9998 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10002 switch (inst
.instruction
)
10004 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10005 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10006 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10007 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
10010 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10011 inst
.instruction
|= inst
.operands
[0].reg
;
10012 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10020 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10021 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10022 inst
.instruction
|= inst
.operands
[2].reg
;
10028 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10029 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10030 _("expression too complex"));
10031 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10032 inst
.instruction
|= (value
& 0xf000) >> 12;
10033 inst
.instruction
|= (value
& 0x0ff0);
10034 inst
.instruction
|= (value
& 0x000f) << 16;
10040 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10041 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10042 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10044 if (inst
.operands
[3].present
)
10046 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10047 _("expression too complex"));
10049 if (inst
.reloc
.exp
.X_add_number
!= 0)
10051 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10052 inst
.instruction
|= 0x00200000; /* sh bit */
10053 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10054 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10056 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10063 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10064 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10065 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10071 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10072 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10073 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10074 || inst
.operands
[2].negative
,
10077 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10078 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10079 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10080 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
10086 if (!inst
.operands
[2].present
)
10087 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
10089 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10090 || inst
.operands
[0].reg
== inst
.operands
[2].reg
10091 || inst
.operands
[0].reg
== inst
.operands
[3].reg
10092 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
10095 inst
.instruction
|= inst
.operands
[0].reg
;
10096 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10097 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10098 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10104 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10105 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10106 inst
.instruction
|= inst
.operands
[2].reg
;
10107 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
10113 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
10114 && inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
10115 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
10117 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10118 inst
.instruction
|= inst
.operands
[0].reg
;
10119 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10121 else if (unified_syntax
)
10123 if (inst
.instruction
<= 0xffff)
10124 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10125 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10126 inst
.instruction
|= inst
.operands
[1].reg
;
10127 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
10131 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
10132 _("Thumb encoding does not support rotation"));
10133 constraint (1, BAD_HIREG
);
10140 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
10148 half
= (inst
.instruction
& 0x10) != 0;
10149 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
10150 constraint (inst
.operands
[0].immisreg
,
10151 _("instruction requires register index"));
10152 constraint (inst
.operands
[0].imm
== 15,
10153 _("PC is not a valid index register"));
10154 constraint (!half
&& inst
.operands
[0].shifted
,
10155 _("instruction does not allow shifted index"));
10156 inst
.instruction
|= (inst
.operands
[0].reg
<< 16) | inst
.operands
[0].imm
;
10162 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10163 inst
.instruction
|= inst
.operands
[1].imm
;
10164 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10166 if (inst
.operands
[3].present
)
10168 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10169 _("expression too complex"));
10170 if (inst
.reloc
.exp
.X_add_number
!= 0)
10172 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10173 inst
.instruction
|= 0x00200000; /* sh bit */
10175 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10176 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10178 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10185 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10186 inst
.instruction
|= inst
.operands
[1].imm
;
10187 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10190 /* Neon instruction encoder helpers. */
10192 /* Encodings for the different types for various Neon opcodes. */
10194 /* An "invalid" code for the following tables. */
10197 struct neon_tab_entry
10200 unsigned float_or_poly
;
10201 unsigned scalar_or_imm
;
10204 /* Map overloaded Neon opcodes to their respective encodings. */
10205 #define NEON_ENC_TAB \
10206 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10207 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10208 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10209 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10210 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10211 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10212 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10213 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10214 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10215 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10216 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10217 /* Register variants of the following two instructions are encoded as
10218 vcge / vcgt with the operands reversed. */ \
10219 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
10220 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
10221 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10222 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10223 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10224 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10225 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10226 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10227 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10228 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10229 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10230 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10231 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10232 X(vshl, 0x0000400, N_INV, 0x0800510), \
10233 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10234 X(vand, 0x0000110, N_INV, 0x0800030), \
10235 X(vbic, 0x0100110, N_INV, 0x0800030), \
10236 X(veor, 0x1000110, N_INV, N_INV), \
10237 X(vorn, 0x0300110, N_INV, 0x0800010), \
10238 X(vorr, 0x0200110, N_INV, 0x0800010), \
10239 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10240 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10241 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10242 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10243 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10244 X(vst1, 0x0000000, 0x0800000, N_INV), \
10245 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10246 X(vst2, 0x0000100, 0x0800100, N_INV), \
10247 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10248 X(vst3, 0x0000200, 0x0800200, N_INV), \
10249 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10250 X(vst4, 0x0000300, 0x0800300, N_INV), \
10251 X(vmovn, 0x1b20200, N_INV, N_INV), \
10252 X(vtrn, 0x1b20080, N_INV, N_INV), \
10253 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10254 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10255 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10256 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10257 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10258 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10259 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10260 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10261 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10265 #define X(OPC,I,F,S) N_MNEM_##OPC
10270 static const struct neon_tab_entry neon_enc_tab
[] =
10272 #define X(OPC,I,F,S) { (I), (F), (S) }
10277 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10278 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10279 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10280 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10281 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10282 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10283 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10284 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10285 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10286 #define NEON_ENC_SINGLE(X) \
10287 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10288 #define NEON_ENC_DOUBLE(X) \
10289 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10291 /* Define shapes for instruction operands. The following mnemonic characters
10292 are used in this table:
10294 F - VFP S<n> register
10295 D - Neon D<n> register
10296 Q - Neon Q<n> register
10300 L - D<n> register list
10302 This table is used to generate various data:
10303 - enumerations of the form NS_DDR to be used as arguments to
10305 - a table classifying shapes into single, double, quad, mixed.
10306 - a table used to drive neon_select_shape.
10309 #define NEON_SHAPE_DEF \
10310 X(3, (D, D, D), DOUBLE), \
10311 X(3, (Q, Q, Q), QUAD), \
10312 X(3, (D, D, I), DOUBLE), \
10313 X(3, (Q, Q, I), QUAD), \
10314 X(3, (D, D, S), DOUBLE), \
10315 X(3, (Q, Q, S), QUAD), \
10316 X(2, (D, D), DOUBLE), \
10317 X(2, (Q, Q), QUAD), \
10318 X(2, (D, S), DOUBLE), \
10319 X(2, (Q, S), QUAD), \
10320 X(2, (D, R), DOUBLE), \
10321 X(2, (Q, R), QUAD), \
10322 X(2, (D, I), DOUBLE), \
10323 X(2, (Q, I), QUAD), \
10324 X(3, (D, L, D), DOUBLE), \
10325 X(2, (D, Q), MIXED), \
10326 X(2, (Q, D), MIXED), \
10327 X(3, (D, Q, I), MIXED), \
10328 X(3, (Q, D, I), MIXED), \
10329 X(3, (Q, D, D), MIXED), \
10330 X(3, (D, Q, Q), MIXED), \
10331 X(3, (Q, Q, D), MIXED), \
10332 X(3, (Q, D, S), MIXED), \
10333 X(3, (D, Q, S), MIXED), \
10334 X(4, (D, D, D, I), DOUBLE), \
10335 X(4, (Q, Q, Q, I), QUAD), \
10336 X(2, (F, F), SINGLE), \
10337 X(3, (F, F, F), SINGLE), \
10338 X(2, (F, I), SINGLE), \
10339 X(2, (F, D), MIXED), \
10340 X(2, (D, F), MIXED), \
10341 X(3, (F, F, I), MIXED), \
10342 X(4, (R, R, F, F), SINGLE), \
10343 X(4, (F, F, R, R), SINGLE), \
10344 X(3, (D, R, R), DOUBLE), \
10345 X(3, (R, R, D), DOUBLE), \
10346 X(2, (S, R), SINGLE), \
10347 X(2, (R, S), SINGLE), \
10348 X(2, (F, R), SINGLE), \
10349 X(2, (R, F), SINGLE)
10351 #define S2(A,B) NS_##A##B
10352 #define S3(A,B,C) NS_##A##B##C
10353 #define S4(A,B,C,D) NS_##A##B##C##D
10355 #define X(N, L, C) S##N L
10368 enum neon_shape_class
10376 #define X(N, L, C) SC_##C
10378 static enum neon_shape_class neon_shape_class
[] =
10396 /* Register widths of above. */
10397 static unsigned neon_shape_el_size
[] =
10408 struct neon_shape_info
10411 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
10414 #define S2(A,B) { SE_##A, SE_##B }
10415 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10416 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10418 #define X(N, L, C) { N, S##N L }
10420 static struct neon_shape_info neon_shape_tab
[] =
10430 /* Bit masks used in type checking given instructions.
10431 'N_EQK' means the type must be the same as (or based on in some way) the key
10432 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10433 set, various other bits can be set as well in order to modify the meaning of
10434 the type constraint. */
10436 enum neon_type_mask
10458 N_KEY
= 0x100000, /* key element (main type specifier). */
10459 N_EQK
= 0x200000, /* given operand has the same type & size as the key. */
10460 N_VFP
= 0x400000, /* VFP mode: operand size must match register width. */
10461 N_DBL
= 0x000001, /* if N_EQK, this operand is twice the size. */
10462 N_HLF
= 0x000002, /* if N_EQK, this operand is half the size. */
10463 N_SGN
= 0x000004, /* if N_EQK, this operand is forced to be signed. */
10464 N_UNS
= 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10465 N_INT
= 0x000010, /* if N_EQK, this operand is forced to be integer. */
10466 N_FLT
= 0x000020, /* if N_EQK, this operand is forced to be float. */
10467 N_SIZ
= 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10469 N_MAX_NONSPECIAL
= N_F64
10472 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10474 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10475 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10476 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10477 #define N_SUF_32 (N_SU_32 | N_F32)
10478 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10479 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10481 /* Pass this as the first type argument to neon_check_type to ignore types
10483 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10485 /* Select a "shape" for the current instruction (describing register types or
10486 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10487 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10488 function of operand parsing, so this function doesn't need to be called.
10489 Shapes should be listed in order of decreasing length. */
10491 static enum neon_shape
10492 neon_select_shape (enum neon_shape shape
, ...)
10495 enum neon_shape first_shape
= shape
;
10497 /* Fix missing optional operands. FIXME: we don't know at this point how
10498 many arguments we should have, so this makes the assumption that we have
10499 > 1. This is true of all current Neon opcodes, I think, but may not be
10500 true in the future. */
10501 if (!inst
.operands
[1].present
)
10502 inst
.operands
[1] = inst
.operands
[0];
10504 va_start (ap
, shape
);
10506 for (; shape
!= NS_NULL
; shape
= va_arg (ap
, int))
10511 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
10513 if (!inst
.operands
[j
].present
)
10519 switch (neon_shape_tab
[shape
].el
[j
])
10522 if (!(inst
.operands
[j
].isreg
10523 && inst
.operands
[j
].isvec
10524 && inst
.operands
[j
].issingle
10525 && !inst
.operands
[j
].isquad
))
10530 if (!(inst
.operands
[j
].isreg
10531 && inst
.operands
[j
].isvec
10532 && !inst
.operands
[j
].isquad
10533 && !inst
.operands
[j
].issingle
))
10538 if (!(inst
.operands
[j
].isreg
10539 && !inst
.operands
[j
].isvec
))
10544 if (!(inst
.operands
[j
].isreg
10545 && inst
.operands
[j
].isvec
10546 && inst
.operands
[j
].isquad
10547 && !inst
.operands
[j
].issingle
))
10552 if (!(!inst
.operands
[j
].isreg
10553 && !inst
.operands
[j
].isscalar
))
10558 if (!(!inst
.operands
[j
].isreg
10559 && inst
.operands
[j
].isscalar
))
10573 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
10574 first_error (_("invalid instruction shape"));
10579 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10580 means the Q bit should be set). */
10583 neon_quad (enum neon_shape shape
)
10585 return neon_shape_class
[shape
] == SC_QUAD
;
10589 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
10592 /* Allow modification to be made to types which are constrained to be
10593 based on the key element, based on bits set alongside N_EQK. */
10594 if ((typebits
& N_EQK
) != 0)
10596 if ((typebits
& N_HLF
) != 0)
10598 else if ((typebits
& N_DBL
) != 0)
10600 if ((typebits
& N_SGN
) != 0)
10601 *g_type
= NT_signed
;
10602 else if ((typebits
& N_UNS
) != 0)
10603 *g_type
= NT_unsigned
;
10604 else if ((typebits
& N_INT
) != 0)
10605 *g_type
= NT_integer
;
10606 else if ((typebits
& N_FLT
) != 0)
10607 *g_type
= NT_float
;
10608 else if ((typebits
& N_SIZ
) != 0)
10609 *g_type
= NT_untyped
;
10613 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10614 operand type, i.e. the single type specified in a Neon instruction when it
10615 is the only one given. */
10617 static struct neon_type_el
10618 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
10620 struct neon_type_el dest
= *key
;
10622 assert ((thisarg
& N_EQK
) != 0);
10624 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
10629 /* Convert Neon type and size into compact bitmask representation. */
10631 static enum neon_type_mask
10632 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
10639 case 8: return N_8
;
10640 case 16: return N_16
;
10641 case 32: return N_32
;
10642 case 64: return N_64
;
10650 case 8: return N_I8
;
10651 case 16: return N_I16
;
10652 case 32: return N_I32
;
10653 case 64: return N_I64
;
10661 case 32: return N_F32
;
10662 case 64: return N_F64
;
10670 case 8: return N_P8
;
10671 case 16: return N_P16
;
10679 case 8: return N_S8
;
10680 case 16: return N_S16
;
10681 case 32: return N_S32
;
10682 case 64: return N_S64
;
10690 case 8: return N_U8
;
10691 case 16: return N_U16
;
10692 case 32: return N_U32
;
10693 case 64: return N_U64
;
10704 /* Convert compact Neon bitmask type representation to a type and size. Only
10705 handles the case where a single bit is set in the mask. */
10708 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
10709 enum neon_type_mask mask
)
10711 if ((mask
& N_EQK
) != 0)
10714 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
10716 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
10718 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
10720 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
10725 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
10727 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
10728 *type
= NT_unsigned
;
10729 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
10730 *type
= NT_integer
;
10731 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
10732 *type
= NT_untyped
;
10733 else if ((mask
& (N_P8
| N_P16
)) != 0)
10735 else if ((mask
& (N_F32
| N_F64
)) != 0)
10743 /* Modify a bitmask of allowed types. This is only needed for type
10747 modify_types_allowed (unsigned allowed
, unsigned mods
)
10750 enum neon_el_type type
;
10756 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
10758 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
10760 neon_modify_type_size (mods
, &type
, &size
);
10761 destmask
|= type_chk_of_el_type (type
, size
);
10768 /* Check type and return type classification.
10769 The manual states (paraphrase): If one datatype is given, it indicates the
10771 - the second operand, if there is one
10772 - the operand, if there is no second operand
10773 - the result, if there are no operands.
10774 This isn't quite good enough though, so we use a concept of a "key" datatype
10775 which is set on a per-instruction basis, which is the one which matters when
10776 only one data type is written.
10777 Note: this function has side-effects (e.g. filling in missing operands). All
10778 Neon instructions should call it before performing bit encoding. */
10780 static struct neon_type_el
10781 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
10784 unsigned i
, pass
, key_el
= 0;
10785 unsigned types
[NEON_MAX_TYPE_ELS
];
10786 enum neon_el_type k_type
= NT_invtype
;
10787 unsigned k_size
= -1u;
10788 struct neon_type_el badtype
= {NT_invtype
, -1};
10789 unsigned key_allowed
= 0;
10791 /* Optional registers in Neon instructions are always (not) in operand 1.
10792 Fill in the missing operand here, if it was omitted. */
10793 if (els
> 1 && !inst
.operands
[1].present
)
10794 inst
.operands
[1] = inst
.operands
[0];
10796 /* Suck up all the varargs. */
10798 for (i
= 0; i
< els
; i
++)
10800 unsigned thisarg
= va_arg (ap
, unsigned);
10801 if (thisarg
== N_IGNORE_TYPE
)
10806 types
[i
] = thisarg
;
10807 if ((thisarg
& N_KEY
) != 0)
10812 if (inst
.vectype
.elems
> 0)
10813 for (i
= 0; i
< els
; i
++)
10814 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
10816 first_error (_("types specified in both the mnemonic and operands"));
10820 /* Duplicate inst.vectype elements here as necessary.
10821 FIXME: No idea if this is exactly the same as the ARM assembler,
10822 particularly when an insn takes one register and one non-register
10824 if (inst
.vectype
.elems
== 1 && els
> 1)
10827 inst
.vectype
.elems
= els
;
10828 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
10829 for (j
= 0; j
< els
; j
++)
10831 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
10834 else if (inst
.vectype
.elems
== 0 && els
> 0)
10837 /* No types were given after the mnemonic, so look for types specified
10838 after each operand. We allow some flexibility here; as long as the
10839 "key" operand has a type, we can infer the others. */
10840 for (j
= 0; j
< els
; j
++)
10841 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
10842 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
10844 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
10846 for (j
= 0; j
< els
; j
++)
10847 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
10848 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
10853 first_error (_("operand types can't be inferred"));
10857 else if (inst
.vectype
.elems
!= els
)
10859 first_error (_("type specifier has the wrong number of parts"));
10863 for (pass
= 0; pass
< 2; pass
++)
10865 for (i
= 0; i
< els
; i
++)
10867 unsigned thisarg
= types
[i
];
10868 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
10869 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
10870 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
10871 unsigned g_size
= inst
.vectype
.el
[i
].size
;
10873 /* Decay more-specific signed & unsigned types to sign-insensitive
10874 integer types if sign-specific variants are unavailable. */
10875 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
10876 && (types_allowed
& N_SU_ALL
) == 0)
10877 g_type
= NT_integer
;
10879 /* If only untyped args are allowed, decay any more specific types to
10880 them. Some instructions only care about signs for some element
10881 sizes, so handle that properly. */
10882 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
10883 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
10884 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
10885 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
10886 g_type
= NT_untyped
;
10890 if ((thisarg
& N_KEY
) != 0)
10894 key_allowed
= thisarg
& ~N_KEY
;
10899 if ((thisarg
& N_VFP
) != 0)
10901 enum neon_shape_el regshape
= neon_shape_tab
[ns
].el
[i
];
10902 unsigned regwidth
= neon_shape_el_size
[regshape
], match
;
10904 /* In VFP mode, operands must match register widths. If we
10905 have a key operand, use its width, else use the width of
10906 the current operand. */
10912 if (regwidth
!= match
)
10914 first_error (_("operand size must match register width"));
10919 if ((thisarg
& N_EQK
) == 0)
10921 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
10923 if ((given_type
& types_allowed
) == 0)
10925 first_error (_("bad type in Neon instruction"));
10931 enum neon_el_type mod_k_type
= k_type
;
10932 unsigned mod_k_size
= k_size
;
10933 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
10934 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
10936 first_error (_("inconsistent types in Neon instruction"));
10944 return inst
.vectype
.el
[key_el
];
10947 /* Neon-style VFP instruction forwarding. */
10949 /* Thumb VFP instructions have 0xE in the condition field. */
10952 do_vfp_cond_or_thumb (void)
10955 inst
.instruction
|= 0xe0000000;
10957 inst
.instruction
|= inst
.cond
<< 28;
10960 /* Look up and encode a simple mnemonic, for use as a helper function for the
10961 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
10962 etc. It is assumed that operand parsing has already been done, and that the
10963 operands are in the form expected by the given opcode (this isn't necessarily
10964 the same as the form in which they were parsed, hence some massaging must
10965 take place before this function is called).
10966 Checks current arch version against that in the looked-up opcode. */
10969 do_vfp_nsyn_opcode (const char *opname
)
10971 const struct asm_opcode
*opcode
;
10973 opcode
= hash_find (arm_ops_hsh
, opname
);
10978 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
10979 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
10984 inst
.instruction
= opcode
->tvalue
;
10985 opcode
->tencode ();
10989 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
10990 opcode
->aencode ();
10995 do_vfp_nsyn_add_sub (enum neon_shape rs
)
10997 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
11002 do_vfp_nsyn_opcode ("fadds");
11004 do_vfp_nsyn_opcode ("fsubs");
11009 do_vfp_nsyn_opcode ("faddd");
11011 do_vfp_nsyn_opcode ("fsubd");
11015 /* Check operand types to see if this is a VFP instruction, and if so call
11019 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
11021 enum neon_shape rs
;
11022 struct neon_type_el et
;
11027 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11028 et
= neon_check_type (2, rs
,
11029 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11033 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11034 et
= neon_check_type (3, rs
,
11035 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11042 if (et
.type
!= NT_invtype
)
11054 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
11056 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
11061 do_vfp_nsyn_opcode ("fmacs");
11063 do_vfp_nsyn_opcode ("fmscs");
11068 do_vfp_nsyn_opcode ("fmacd");
11070 do_vfp_nsyn_opcode ("fmscd");
11075 do_vfp_nsyn_mul (enum neon_shape rs
)
11078 do_vfp_nsyn_opcode ("fmuls");
11080 do_vfp_nsyn_opcode ("fmuld");
11084 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
11086 int is_neg
= (inst
.instruction
& 0x80) != 0;
11087 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
11092 do_vfp_nsyn_opcode ("fnegs");
11094 do_vfp_nsyn_opcode ("fabss");
11099 do_vfp_nsyn_opcode ("fnegd");
11101 do_vfp_nsyn_opcode ("fabsd");
11105 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11106 insns belong to Neon, and are handled elsewhere. */
11109 do_vfp_nsyn_ldm_stm (int is_dbmode
)
11111 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
11115 do_vfp_nsyn_opcode ("fldmdbs");
11117 do_vfp_nsyn_opcode ("fldmias");
11122 do_vfp_nsyn_opcode ("fstmdbs");
11124 do_vfp_nsyn_opcode ("fstmias");
11129 do_vfp_nsyn_sqrt (void)
11131 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11132 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11135 do_vfp_nsyn_opcode ("fsqrts");
11137 do_vfp_nsyn_opcode ("fsqrtd");
11141 do_vfp_nsyn_div (void)
11143 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11144 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11145 N_F32
| N_F64
| N_KEY
| N_VFP
);
11148 do_vfp_nsyn_opcode ("fdivs");
11150 do_vfp_nsyn_opcode ("fdivd");
11154 do_vfp_nsyn_nmul (void)
11156 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11157 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11158 N_F32
| N_F64
| N_KEY
| N_VFP
);
11162 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11163 do_vfp_sp_dyadic ();
11167 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11168 do_vfp_dp_rd_rn_rm ();
11170 do_vfp_cond_or_thumb ();
11174 do_vfp_nsyn_cmp (void)
11176 if (inst
.operands
[1].isreg
)
11178 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11179 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11183 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11184 do_vfp_sp_monadic ();
11188 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11189 do_vfp_dp_rd_rm ();
11194 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
11195 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
11197 switch (inst
.instruction
& 0x0fffffff)
11200 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
11203 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
11211 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11212 do_vfp_sp_compare_z ();
11216 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11220 do_vfp_cond_or_thumb ();
11224 nsyn_insert_sp (void)
11226 inst
.operands
[1] = inst
.operands
[0];
11227 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
11228 inst
.operands
[0].reg
= 13;
11229 inst
.operands
[0].isreg
= 1;
11230 inst
.operands
[0].writeback
= 1;
11231 inst
.operands
[0].present
= 1;
11235 do_vfp_nsyn_push (void)
11238 if (inst
.operands
[1].issingle
)
11239 do_vfp_nsyn_opcode ("fstmdbs");
11241 do_vfp_nsyn_opcode ("fstmdbd");
11245 do_vfp_nsyn_pop (void)
11248 if (inst
.operands
[1].issingle
)
11249 do_vfp_nsyn_opcode ("fldmdbs");
11251 do_vfp_nsyn_opcode ("fldmdbd");
11254 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11255 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11258 neon_dp_fixup (unsigned i
)
11262 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11276 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11280 neon_logbits (unsigned x
)
11282 return ffs (x
) - 4;
11285 #define LOW4(R) ((R) & 0xf)
11286 #define HI1(R) (((R) >> 4) & 1)
11288 /* Encode insns with bit pattern:
11290 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11291 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11293 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11294 different meaning for some instruction. */
11297 neon_three_same (int isquad
, int ubit
, int size
)
11299 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11300 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11301 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11302 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11303 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11304 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11305 inst
.instruction
|= (isquad
!= 0) << 6;
11306 inst
.instruction
|= (ubit
!= 0) << 24;
11308 inst
.instruction
|= neon_logbits (size
) << 20;
11310 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11313 /* Encode instructions of the form:
11315 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11316 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11318 Don't write size if SIZE == -1. */
11321 neon_two_same (int qbit
, int ubit
, int size
)
11323 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11324 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11325 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11326 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11327 inst
.instruction
|= (qbit
!= 0) << 6;
11328 inst
.instruction
|= (ubit
!= 0) << 24;
11331 inst
.instruction
|= neon_logbits (size
) << 18;
11333 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11336 /* Neon instruction encoders, in approximate order of appearance. */
11339 do_neon_dyadic_i_su (void)
11341 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11342 struct neon_type_el et
= neon_check_type (3, rs
,
11343 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
11344 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11348 do_neon_dyadic_i64_su (void)
11350 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11351 struct neon_type_el et
= neon_check_type (3, rs
,
11352 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
11353 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11357 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
11360 unsigned size
= et
.size
>> 3;
11361 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11362 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11363 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11364 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11365 inst
.instruction
|= (isquad
!= 0) << 6;
11366 inst
.instruction
|= immbits
<< 16;
11367 inst
.instruction
|= (size
>> 3) << 7;
11368 inst
.instruction
|= (size
& 0x7) << 19;
11370 inst
.instruction
|= (uval
!= 0) << 24;
11372 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11376 do_neon_shl_imm (void)
11378 if (!inst
.operands
[2].isreg
)
11380 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11381 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
11382 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11383 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
11387 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11388 struct neon_type_el et
= neon_check_type (3, rs
,
11389 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
11390 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11391 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11396 do_neon_qshl_imm (void)
11398 if (!inst
.operands
[2].isreg
)
11400 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11401 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
11402 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11403 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
11404 inst
.operands
[2].imm
);
11408 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11409 struct neon_type_el et
= neon_check_type (3, rs
,
11410 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
11411 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11412 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11417 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
11419 /* Handle .I8 pseudo-instructions. */
11422 /* Unfortunately, this will make everything apart from zero out-of-range.
11423 FIXME is this the intended semantics? There doesn't seem much point in
11424 accepting .I8 if so. */
11425 immediate
|= immediate
<< 8;
11431 if (immediate
== (immediate
& 0x000000ff))
11433 *immbits
= immediate
;
11436 else if (immediate
== (immediate
& 0x0000ff00))
11438 *immbits
= immediate
>> 8;
11441 else if (immediate
== (immediate
& 0x00ff0000))
11443 *immbits
= immediate
>> 16;
11446 else if (immediate
== (immediate
& 0xff000000))
11448 *immbits
= immediate
>> 24;
11451 if ((immediate
& 0xffff) != (immediate
>> 16))
11452 goto bad_immediate
;
11453 immediate
&= 0xffff;
11456 if (immediate
== (immediate
& 0x000000ff))
11458 *immbits
= immediate
;
11461 else if (immediate
== (immediate
& 0x0000ff00))
11463 *immbits
= immediate
>> 8;
11468 first_error (_("immediate value out of range"));
11472 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11476 neon_bits_same_in_bytes (unsigned imm
)
11478 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
11479 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
11480 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
11481 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
11484 /* For immediate of above form, return 0bABCD. */
11487 neon_squash_bits (unsigned imm
)
11489 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
11490 | ((imm
& 0x01000000) >> 21);
11493 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11496 neon_qfloat_bits (unsigned imm
)
11498 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
11501 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11502 the instruction. *OP is passed as the initial value of the op field, and
11503 may be set to a different value depending on the constant (i.e.
11504 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11505 MVN). If the immediate looks like a repeated parttern then also
11506 try smaller element sizes. */
11509 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, unsigned *immbits
,
11510 int *op
, int size
, enum neon_el_type type
)
11512 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
11514 if (size
!= 32 || *op
== 1)
11516 *immbits
= neon_qfloat_bits (immlo
);
11522 if (neon_bits_same_in_bytes (immhi
)
11523 && neon_bits_same_in_bytes (immlo
))
11527 *immbits
= (neon_squash_bits (immhi
) << 4)
11528 | neon_squash_bits (immlo
);
11533 if (immhi
!= immlo
)
11539 if (immlo
== (immlo
& 0x000000ff))
11544 else if (immlo
== (immlo
& 0x0000ff00))
11546 *immbits
= immlo
>> 8;
11549 else if (immlo
== (immlo
& 0x00ff0000))
11551 *immbits
= immlo
>> 16;
11554 else if (immlo
== (immlo
& 0xff000000))
11556 *immbits
= immlo
>> 24;
11559 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
11561 *immbits
= (immlo
>> 8) & 0xff;
11564 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
11566 *immbits
= (immlo
>> 16) & 0xff;
11570 if ((immlo
& 0xffff) != (immlo
>> 16))
11577 if (immlo
== (immlo
& 0x000000ff))
11582 else if (immlo
== (immlo
& 0x0000ff00))
11584 *immbits
= immlo
>> 8;
11588 if ((immlo
& 0xff) != (immlo
>> 8))
11593 if (immlo
== (immlo
& 0x000000ff))
11595 /* Don't allow MVN with 8-bit immediate. */
11605 /* Write immediate bits [7:0] to the following locations:
11607 |28/24|23 19|18 16|15 4|3 0|
11608 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11610 This function is used by VMOV/VMVN/VORR/VBIC. */
11613 neon_write_immbits (unsigned immbits
)
11615 inst
.instruction
|= immbits
& 0xf;
11616 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
11617 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
11620 /* Invert low-order SIZE bits of XHI:XLO. */
11623 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
11625 unsigned immlo
= xlo
? *xlo
: 0;
11626 unsigned immhi
= xhi
? *xhi
: 0;
11631 immlo
= (~immlo
) & 0xff;
11635 immlo
= (~immlo
) & 0xffff;
11639 immhi
= (~immhi
) & 0xffffffff;
11640 /* fall through. */
11643 immlo
= (~immlo
) & 0xffffffff;
11658 do_neon_logic (void)
11660 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
11662 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11663 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11664 /* U bit and size field were set as part of the bitmask. */
11665 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11666 neon_three_same (neon_quad (rs
), 0, -1);
11670 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
11671 struct neon_type_el et
= neon_check_type (2, rs
,
11672 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
11673 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
11677 if (et
.type
== NT_invtype
)
11680 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11682 immbits
= inst
.operands
[1].imm
;
11685 /* .i64 is a pseudo-op, so the immediate must be a repeating
11687 if (immbits
!= (inst
.operands
[1].regisimm
?
11688 inst
.operands
[1].reg
: 0))
11690 /* Set immbits to an invalid constant. */
11691 immbits
= 0xdeadbeef;
11698 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11702 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11706 /* Pseudo-instruction for VBIC. */
11707 neon_invert_size (&immbits
, 0, et
.size
);
11708 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11712 /* Pseudo-instruction for VORR. */
11713 neon_invert_size (&immbits
, 0, et
.size
);
11714 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11724 inst
.instruction
|= neon_quad (rs
) << 6;
11725 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11726 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11727 inst
.instruction
|= cmode
<< 8;
11728 neon_write_immbits (immbits
);
11730 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11735 do_neon_bitfield (void)
11737 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11738 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11739 neon_three_same (neon_quad (rs
), 0, -1);
11743 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
11746 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11747 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
11749 if (et
.type
== NT_float
)
11751 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
11752 neon_three_same (neon_quad (rs
), 0, -1);
11756 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11757 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
11762 do_neon_dyadic_if_su (void)
11764 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
11768 do_neon_dyadic_if_su_d (void)
11770 /* This version only allow D registers, but that constraint is enforced during
11771 operand parsing so we don't need to do anything extra here. */
11772 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
11776 do_neon_dyadic_if_i_d (void)
11778 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11779 affected if we specify unsigned args. */
11780 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
11783 enum vfp_or_neon_is_neon_bits
11786 NEON_CHECK_ARCH
= 2
11789 /* Call this function if an instruction which may have belonged to the VFP or
11790 Neon instruction sets, but turned out to be a Neon instruction (due to the
11791 operand types involved, etc.). We have to check and/or fix-up a couple of
11794 - Make sure the user hasn't attempted to make a Neon instruction
11796 - Alter the value in the condition code field if necessary.
11797 - Make sure that the arch supports Neon instructions.
11799 Which of these operations take place depends on bits from enum
11800 vfp_or_neon_is_neon_bits.
11802 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11803 current instruction's condition is COND_ALWAYS, the condition field is
11804 changed to inst.uncond_value. This is necessary because instructions shared
11805 between VFP and Neon may be conditional for the VFP variants only, and the
11806 unconditional Neon version must have, e.g., 0xF in the condition field. */
11809 vfp_or_neon_is_neon (unsigned check
)
11811 /* Conditions are always legal in Thumb mode (IT blocks). */
11812 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
11814 if (inst
.cond
!= COND_ALWAYS
)
11816 first_error (_(BAD_COND
));
11819 if (inst
.uncond_value
!= -1)
11820 inst
.instruction
|= inst
.uncond_value
<< 28;
11823 if ((check
& NEON_CHECK_ARCH
)
11824 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
11826 first_error (_(BAD_FPU
));
11834 do_neon_addsub_if_i (void)
11836 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
11839 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11842 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11843 affected if we specify unsigned args. */
11844 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
11847 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11849 V<op> A,B (A is operand 0, B is operand 2)
11854 so handle that case specially. */
11857 neon_exchange_operands (void)
11859 void *scratch
= alloca (sizeof (inst
.operands
[0]));
11860 if (inst
.operands
[1].present
)
11862 /* Swap operands[1] and operands[2]. */
11863 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
11864 inst
.operands
[1] = inst
.operands
[2];
11865 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
11869 inst
.operands
[1] = inst
.operands
[2];
11870 inst
.operands
[2] = inst
.operands
[0];
11875 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
11877 if (inst
.operands
[2].isreg
)
11880 neon_exchange_operands ();
11881 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
11885 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11886 struct neon_type_el et
= neon_check_type (2, rs
,
11887 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
11889 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11890 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11891 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11892 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11893 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11894 inst
.instruction
|= neon_quad (rs
) << 6;
11895 inst
.instruction
|= (et
.type
== NT_float
) << 10;
11896 inst
.instruction
|= neon_logbits (et
.size
) << 18;
11898 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11905 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
11909 do_neon_cmp_inv (void)
11911 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
11917 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
11920 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
11921 scalars, which are encoded in 5 bits, M : Rm.
11922 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
11923 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
11927 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
11929 unsigned regno
= NEON_SCALAR_REG (scalar
);
11930 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
11935 if (regno
> 7 || elno
> 3)
11937 return regno
| (elno
<< 3);
11940 if (regno
> 15 || elno
> 1)
11942 return regno
| (elno
<< 4);
11946 first_error (_("scalar out of range for multiply instruction"));
11952 /* Encode multiply / multiply-accumulate scalar instructions. */
11955 neon_mul_mac (struct neon_type_el et
, int ubit
)
11959 /* Give a more helpful error message if we have an invalid type. */
11960 if (et
.type
== NT_invtype
)
11963 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
11964 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11965 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11966 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11967 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11968 inst
.instruction
|= LOW4 (scalar
);
11969 inst
.instruction
|= HI1 (scalar
) << 5;
11970 inst
.instruction
|= (et
.type
== NT_float
) << 8;
11971 inst
.instruction
|= neon_logbits (et
.size
) << 20;
11972 inst
.instruction
|= (ubit
!= 0) << 24;
11974 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11978 do_neon_mac_maybe_scalar (void)
11980 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
11983 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11986 if (inst
.operands
[2].isscalar
)
11988 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
11989 struct neon_type_el et
= neon_check_type (3, rs
,
11990 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
11991 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11992 neon_mul_mac (et
, neon_quad (rs
));
11996 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11997 affected if we specify unsigned args. */
11998 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12005 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12006 struct neon_type_el et
= neon_check_type (3, rs
,
12007 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12008 neon_three_same (neon_quad (rs
), 0, et
.size
);
12011 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12012 same types as the MAC equivalents. The polynomial type for this instruction
12013 is encoded the same as the integer type. */
12018 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
12021 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12024 if (inst
.operands
[2].isscalar
)
12025 do_neon_mac_maybe_scalar ();
12027 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
12031 do_neon_qdmulh (void)
12033 if (inst
.operands
[2].isscalar
)
12035 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
12036 struct neon_type_el et
= neon_check_type (3, rs
,
12037 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12038 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12039 neon_mul_mac (et
, neon_quad (rs
));
12043 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12044 struct neon_type_el et
= neon_check_type (3, rs
,
12045 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12046 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12047 /* The U bit (rounding) comes from bit mask. */
12048 neon_three_same (neon_quad (rs
), 0, et
.size
);
12053 do_neon_fcmp_absolute (void)
12055 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12056 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12057 /* Size field comes from bit mask. */
12058 neon_three_same (neon_quad (rs
), 1, -1);
12062 do_neon_fcmp_absolute_inv (void)
12064 neon_exchange_operands ();
12065 do_neon_fcmp_absolute ();
12069 do_neon_step (void)
12071 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12072 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12073 neon_three_same (neon_quad (rs
), 0, -1);
12077 do_neon_abs_neg (void)
12079 enum neon_shape rs
;
12080 struct neon_type_el et
;
12082 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
12085 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12088 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12089 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
12091 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12092 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12093 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12094 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12095 inst
.instruction
|= neon_quad (rs
) << 6;
12096 inst
.instruction
|= (et
.type
== NT_float
) << 10;
12097 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12099 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12105 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12106 struct neon_type_el et
= neon_check_type (2, rs
,
12107 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12108 int imm
= inst
.operands
[2].imm
;
12109 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12110 _("immediate out of range for insert"));
12111 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12117 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12118 struct neon_type_el et
= neon_check_type (2, rs
,
12119 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12120 int imm
= inst
.operands
[2].imm
;
12121 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12122 _("immediate out of range for insert"));
12123 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
12127 do_neon_qshlu_imm (void)
12129 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12130 struct neon_type_el et
= neon_check_type (2, rs
,
12131 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
12132 int imm
= inst
.operands
[2].imm
;
12133 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12134 _("immediate out of range for shift"));
12135 /* Only encodes the 'U present' variant of the instruction.
12136 In this case, signed types have OP (bit 8) set to 0.
12137 Unsigned types have OP set to 1. */
12138 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
12139 /* The rest of the bits are the same as other immediate shifts. */
12140 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12144 do_neon_qmovn (void)
12146 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12147 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12148 /* Saturating move where operands can be signed or unsigned, and the
12149 destination has the same signedness. */
12150 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12151 if (et
.type
== NT_unsigned
)
12152 inst
.instruction
|= 0xc0;
12154 inst
.instruction
|= 0x80;
12155 neon_two_same (0, 1, et
.size
/ 2);
12159 do_neon_qmovun (void)
12161 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12162 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12163 /* Saturating move with unsigned results. Operands must be signed. */
12164 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12165 neon_two_same (0, 1, et
.size
/ 2);
12169 do_neon_rshift_sat_narrow (void)
12171 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12172 or unsigned. If operands are unsigned, results must also be unsigned. */
12173 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12174 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12175 int imm
= inst
.operands
[2].imm
;
12176 /* This gets the bounds check, size encoding and immediate bits calculation
12180 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12181 VQMOVN.I<size> <Dd>, <Qm>. */
12184 inst
.operands
[2].present
= 0;
12185 inst
.instruction
= N_MNEM_vqmovn
;
12190 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12191 _("immediate out of range"));
12192 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
12196 do_neon_rshift_sat_narrow_u (void)
12198 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12199 or unsigned. If operands are unsigned, results must also be unsigned. */
12200 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12201 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12202 int imm
= inst
.operands
[2].imm
;
12203 /* This gets the bounds check, size encoding and immediate bits calculation
12207 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12208 VQMOVUN.I<size> <Dd>, <Qm>. */
12211 inst
.operands
[2].present
= 0;
12212 inst
.instruction
= N_MNEM_vqmovun
;
12217 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12218 _("immediate out of range"));
12219 /* FIXME: The manual is kind of unclear about what value U should have in
12220 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12222 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
12226 do_neon_movn (void)
12228 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12229 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12230 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12231 neon_two_same (0, 1, et
.size
/ 2);
12235 do_neon_rshift_narrow (void)
12237 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12238 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12239 int imm
= inst
.operands
[2].imm
;
12240 /* This gets the bounds check, size encoding and immediate bits calculation
12244 /* If immediate is zero then we are a pseudo-instruction for
12245 VMOVN.I<size> <Dd>, <Qm> */
12248 inst
.operands
[2].present
= 0;
12249 inst
.instruction
= N_MNEM_vmovn
;
12254 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12255 _("immediate out of range for narrowing operation"));
12256 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
12260 do_neon_shll (void)
12262 /* FIXME: Type checking when lengthening. */
12263 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
12264 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
12265 unsigned imm
= inst
.operands
[2].imm
;
12267 if (imm
== et
.size
)
12269 /* Maximum shift variant. */
12270 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12271 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12272 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12273 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12274 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12275 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12277 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12281 /* A more-specific type check for non-max versions. */
12282 et
= neon_check_type (2, NS_QDI
,
12283 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12284 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12285 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
12289 /* Check the various types for the VCVT instruction, and return which version
12290 the current instruction is. */
12293 neon_cvt_flavour (enum neon_shape rs
)
12295 #define CVT_VAR(C,X,Y) \
12296 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12297 if (et.type != NT_invtype) \
12299 inst.error = NULL; \
12302 struct neon_type_el et
;
12303 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
12304 || rs
== NS_FF
) ? N_VFP
: 0;
12305 /* The instruction versions which take an immediate take one register
12306 argument, which is extended to the width of the full register. Thus the
12307 "source" and "destination" registers must have the same width. Hack that
12308 here by making the size equal to the key (wider, in this case) operand. */
12309 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
12311 CVT_VAR (0, N_S32
, N_F32
);
12312 CVT_VAR (1, N_U32
, N_F32
);
12313 CVT_VAR (2, N_F32
, N_S32
);
12314 CVT_VAR (3, N_F32
, N_U32
);
12318 /* VFP instructions. */
12319 CVT_VAR (4, N_F32
, N_F64
);
12320 CVT_VAR (5, N_F64
, N_F32
);
12321 CVT_VAR (6, N_S32
, N_F64
| key
);
12322 CVT_VAR (7, N_U32
, N_F64
| key
);
12323 CVT_VAR (8, N_F64
| key
, N_S32
);
12324 CVT_VAR (9, N_F64
| key
, N_U32
);
12325 /* VFP instructions with bitshift. */
12326 CVT_VAR (10, N_F32
| key
, N_S16
);
12327 CVT_VAR (11, N_F32
| key
, N_U16
);
12328 CVT_VAR (12, N_F64
| key
, N_S16
);
12329 CVT_VAR (13, N_F64
| key
, N_U16
);
12330 CVT_VAR (14, N_S16
, N_F32
| key
);
12331 CVT_VAR (15, N_U16
, N_F32
| key
);
12332 CVT_VAR (16, N_S16
, N_F64
| key
);
12333 CVT_VAR (17, N_U16
, N_F64
| key
);
12339 /* Neon-syntax VFP conversions. */
12342 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
12344 const char *opname
= 0;
12346 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
12348 /* Conversions with immediate bitshift. */
12349 const char *enc
[] =
12371 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
12373 opname
= enc
[flavour
];
12374 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12375 _("operands 0 and 1 must be the same register"));
12376 inst
.operands
[1] = inst
.operands
[2];
12377 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
12382 /* Conversions without bitshift. */
12383 const char *enc
[] =
12397 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
12398 opname
= enc
[flavour
];
12402 do_vfp_nsyn_opcode (opname
);
12406 do_vfp_nsyn_cvtz (void)
12408 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
12409 int flavour
= neon_cvt_flavour (rs
);
12410 const char *enc
[] =
12422 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
12423 do_vfp_nsyn_opcode (enc
[flavour
]);
12429 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
12430 NS_FD
, NS_DF
, NS_FF
, NS_NULL
);
12431 int flavour
= neon_cvt_flavour (rs
);
12433 /* VFP rather than Neon conversions. */
12436 do_vfp_nsyn_cvt (rs
, flavour
);
12445 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12448 /* Fixed-point conversion with #0 immediate is encoded as an
12449 integer conversion. */
12450 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
12452 unsigned immbits
= 32 - inst
.operands
[2].imm
;
12453 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12454 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12456 inst
.instruction
|= enctab
[flavour
];
12457 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12458 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12459 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12460 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12461 inst
.instruction
|= neon_quad (rs
) << 6;
12462 inst
.instruction
|= 1 << 21;
12463 inst
.instruction
|= immbits
<< 16;
12465 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12473 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
12475 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12477 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12481 inst
.instruction
|= enctab
[flavour
];
12483 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12484 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12485 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12486 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12487 inst
.instruction
|= neon_quad (rs
) << 6;
12488 inst
.instruction
|= 2 << 18;
12490 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12495 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12496 do_vfp_nsyn_cvt (rs
, flavour
);
12501 neon_move_immediate (void)
12503 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
12504 struct neon_type_el et
= neon_check_type (2, rs
,
12505 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
12506 unsigned immlo
, immhi
= 0, immbits
;
12509 constraint (et
.type
== NT_invtype
,
12510 _("operand size must be specified for immediate VMOV"));
12512 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12513 op
= (inst
.instruction
& (1 << 5)) != 0;
12515 immlo
= inst
.operands
[1].imm
;
12516 if (inst
.operands
[1].regisimm
)
12517 immhi
= inst
.operands
[1].reg
;
12519 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
12520 _("immediate has bits set outside the operand size"));
12522 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
12523 et
.size
, et
.type
)) == FAIL
)
12525 /* Invert relevant bits only. */
12526 neon_invert_size (&immlo
, &immhi
, et
.size
);
12527 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12528 with one or the other; those cases are caught by
12529 neon_cmode_for_move_imm. */
12531 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
12532 et
.size
, et
.type
)) == FAIL
)
12534 first_error (_("immediate out of range"));
12539 inst
.instruction
&= ~(1 << 5);
12540 inst
.instruction
|= op
<< 5;
12542 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12543 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12544 inst
.instruction
|= neon_quad (rs
) << 6;
12545 inst
.instruction
|= cmode
<< 8;
12547 neon_write_immbits (immbits
);
12553 if (inst
.operands
[1].isreg
)
12555 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12557 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12558 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12559 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12560 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12561 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12562 inst
.instruction
|= neon_quad (rs
) << 6;
12566 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12567 neon_move_immediate ();
12570 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12573 /* Encode instructions of form:
12575 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12576 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12581 neon_mixed_length (struct neon_type_el et
, unsigned size
)
12583 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12584 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12585 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12586 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12587 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12588 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12589 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
12590 inst
.instruction
|= neon_logbits (size
) << 20;
12592 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12596 do_neon_dyadic_long (void)
12598 /* FIXME: Type checking for lengthening op. */
12599 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12600 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12601 neon_mixed_length (et
, et
.size
);
12605 do_neon_abal (void)
12607 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12608 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12609 neon_mixed_length (et
, et
.size
);
12613 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
12615 if (inst
.operands
[2].isscalar
)
12617 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
12618 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
12619 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12620 neon_mul_mac (et
, et
.type
== NT_unsigned
);
12624 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12625 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
12626 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12627 neon_mixed_length (et
, et
.size
);
12632 do_neon_mac_maybe_scalar_long (void)
12634 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
12638 do_neon_dyadic_wide (void)
12640 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
12641 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12642 neon_mixed_length (et
, et
.size
);
12646 do_neon_dyadic_narrow (void)
12648 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12649 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
12650 /* Operand sign is unimportant, and the U bit is part of the opcode,
12651 so force the operand type to integer. */
12652 et
.type
= NT_integer
;
12653 neon_mixed_length (et
, et
.size
/ 2);
12657 do_neon_mul_sat_scalar_long (void)
12659 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
12663 do_neon_vmull (void)
12665 if (inst
.operands
[2].isscalar
)
12666 do_neon_mac_maybe_scalar_long ();
12669 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12670 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
12671 if (et
.type
== NT_poly
)
12672 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
12674 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12675 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12676 zero. Should be OK as-is. */
12677 neon_mixed_length (et
, et
.size
);
12684 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
12685 struct neon_type_el et
= neon_check_type (3, rs
,
12686 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12687 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
12688 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12689 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12690 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12691 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12692 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12693 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12694 inst
.instruction
|= neon_quad (rs
) << 6;
12695 inst
.instruction
|= imm
<< 8;
12697 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12703 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12704 struct neon_type_el et
= neon_check_type (2, rs
,
12705 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12706 unsigned op
= (inst
.instruction
>> 7) & 3;
12707 /* N (width of reversed regions) is encoded as part of the bitmask. We
12708 extract it here to check the elements to be reversed are smaller.
12709 Otherwise we'd get a reserved instruction. */
12710 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
12711 assert (elsize
!= 0);
12712 constraint (et
.size
>= elsize
,
12713 _("elements must be smaller than reversal region"));
12714 neon_two_same (neon_quad (rs
), 1, et
.size
);
12720 if (inst
.operands
[1].isscalar
)
12722 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
12723 struct neon_type_el et
= neon_check_type (2, rs
,
12724 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12725 unsigned sizebits
= et
.size
>> 3;
12726 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
12727 int logsize
= neon_logbits (et
.size
);
12728 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
12730 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
12733 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12734 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12735 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12736 inst
.instruction
|= LOW4 (dm
);
12737 inst
.instruction
|= HI1 (dm
) << 5;
12738 inst
.instruction
|= neon_quad (rs
) << 6;
12739 inst
.instruction
|= x
<< 17;
12740 inst
.instruction
|= sizebits
<< 16;
12742 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12746 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
12747 struct neon_type_el et
= neon_check_type (2, rs
,
12748 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
12749 /* Duplicate ARM register to lanes of vector. */
12750 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
12753 case 8: inst
.instruction
|= 0x400000; break;
12754 case 16: inst
.instruction
|= 0x000020; break;
12755 case 32: inst
.instruction
|= 0x000000; break;
12758 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
12759 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
12760 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
12761 inst
.instruction
|= neon_quad (rs
) << 21;
12762 /* The encoding for this instruction is identical for the ARM and Thumb
12763 variants, except for the condition field. */
12764 do_vfp_cond_or_thumb ();
12768 /* VMOV has particularly many variations. It can be one of:
12769 0. VMOV<c><q> <Qd>, <Qm>
12770 1. VMOV<c><q> <Dd>, <Dm>
12771 (Register operations, which are VORR with Rm = Rn.)
12772 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12773 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12775 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12776 (ARM register to scalar.)
12777 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12778 (Two ARM registers to vector.)
12779 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12780 (Scalar to ARM register.)
12781 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12782 (Vector to two ARM registers.)
12783 8. VMOV.F32 <Sd>, <Sm>
12784 9. VMOV.F64 <Dd>, <Dm>
12785 (VFP register moves.)
12786 10. VMOV.F32 <Sd>, #imm
12787 11. VMOV.F64 <Dd>, #imm
12788 (VFP float immediate load.)
12789 12. VMOV <Rd>, <Sm>
12790 (VFP single to ARM reg.)
12791 13. VMOV <Sd>, <Rm>
12792 (ARM reg to VFP single.)
12793 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12794 (Two ARM regs to two VFP singles.)
12795 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12796 (Two VFP singles to two ARM regs.)
12798 These cases can be disambiguated using neon_select_shape, except cases 1/9
12799 and 3/11 which depend on the operand type too.
12801 All the encoded bits are hardcoded by this function.
12803 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12804 Cases 5, 7 may be used with VFPv2 and above.
12806 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12807 can specify a type where it doesn't make sense to, and is ignored).
12813 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
12814 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
12816 struct neon_type_el et
;
12817 const char *ldconst
= 0;
12821 case NS_DD
: /* case 1/9. */
12822 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
12823 /* It is not an error here if no type is given. */
12825 if (et
.type
== NT_float
&& et
.size
== 64)
12827 do_vfp_nsyn_opcode ("fcpyd");
12830 /* fall through. */
12832 case NS_QQ
: /* case 0/1. */
12834 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12836 /* The architecture manual I have doesn't explicitly state which
12837 value the U bit should have for register->register moves, but
12838 the equivalent VORR instruction has U = 0, so do that. */
12839 inst
.instruction
= 0x0200110;
12840 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12841 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12842 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12843 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12844 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12845 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12846 inst
.instruction
|= neon_quad (rs
) << 6;
12848 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12852 case NS_DI
: /* case 3/11. */
12853 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
12855 if (et
.type
== NT_float
&& et
.size
== 64)
12857 /* case 11 (fconstd). */
12858 ldconst
= "fconstd";
12859 goto encode_fconstd
;
12861 /* fall through. */
12863 case NS_QI
: /* case 2/3. */
12864 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12866 inst
.instruction
= 0x0800010;
12867 neon_move_immediate ();
12868 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12871 case NS_SR
: /* case 4. */
12873 unsigned bcdebits
= 0;
12874 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
12875 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
12876 int logsize
= neon_logbits (et
.size
);
12877 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
12878 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
12880 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
12882 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
12883 && et
.size
!= 32, _(BAD_FPU
));
12884 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
12885 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
12889 case 8: bcdebits
= 0x8; break;
12890 case 16: bcdebits
= 0x1; break;
12891 case 32: bcdebits
= 0x0; break;
12895 bcdebits
|= x
<< logsize
;
12897 inst
.instruction
= 0xe000b10;
12898 do_vfp_cond_or_thumb ();
12899 inst
.instruction
|= LOW4 (dn
) << 16;
12900 inst
.instruction
|= HI1 (dn
) << 7;
12901 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12902 inst
.instruction
|= (bcdebits
& 3) << 5;
12903 inst
.instruction
|= (bcdebits
>> 2) << 21;
12907 case NS_DRR
: /* case 5 (fmdrr). */
12908 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
12911 inst
.instruction
= 0xc400b10;
12912 do_vfp_cond_or_thumb ();
12913 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
12914 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
12915 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12916 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12919 case NS_RS
: /* case 6. */
12921 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
12922 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
12923 unsigned logsize
= neon_logbits (et
.size
);
12924 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
12925 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
12926 unsigned abcdebits
= 0;
12928 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
12930 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
12931 && et
.size
!= 32, _(BAD_FPU
));
12932 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
12933 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
12937 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
12938 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
12939 case 32: abcdebits
= 0x00; break;
12943 abcdebits
|= x
<< logsize
;
12944 inst
.instruction
= 0xe100b10;
12945 do_vfp_cond_or_thumb ();
12946 inst
.instruction
|= LOW4 (dn
) << 16;
12947 inst
.instruction
|= HI1 (dn
) << 7;
12948 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12949 inst
.instruction
|= (abcdebits
& 3) << 5;
12950 inst
.instruction
|= (abcdebits
>> 2) << 21;
12954 case NS_RRD
: /* case 7 (fmrrd). */
12955 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
12958 inst
.instruction
= 0xc500b10;
12959 do_vfp_cond_or_thumb ();
12960 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12961 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12962 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12963 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12966 case NS_FF
: /* case 8 (fcpys). */
12967 do_vfp_nsyn_opcode ("fcpys");
12970 case NS_FI
: /* case 10 (fconsts). */
12971 ldconst
= "fconsts";
12973 if (is_quarter_float (inst
.operands
[1].imm
))
12975 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
12976 do_vfp_nsyn_opcode (ldconst
);
12979 first_error (_("immediate out of range"));
12982 case NS_RF
: /* case 12 (fmrs). */
12983 do_vfp_nsyn_opcode ("fmrs");
12986 case NS_FR
: /* case 13 (fmsr). */
12987 do_vfp_nsyn_opcode ("fmsr");
12990 /* The encoders for the fmrrs and fmsrr instructions expect three operands
12991 (one of which is a list), but we have parsed four. Do some fiddling to
12992 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
12994 case NS_RRFF
: /* case 14 (fmrrs). */
12995 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
12996 _("VFP registers must be adjacent"));
12997 inst
.operands
[2].imm
= 2;
12998 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
12999 do_vfp_nsyn_opcode ("fmrrs");
13002 case NS_FFRR
: /* case 15 (fmsrr). */
13003 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
13004 _("VFP registers must be adjacent"));
13005 inst
.operands
[1] = inst
.operands
[2];
13006 inst
.operands
[2] = inst
.operands
[3];
13007 inst
.operands
[0].imm
= 2;
13008 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
13009 do_vfp_nsyn_opcode ("fmsrr");
13018 do_neon_rshift_round_imm (void)
13020 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13021 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
13022 int imm
= inst
.operands
[2].imm
;
13024 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13027 inst
.operands
[2].present
= 0;
13032 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13033 _("immediate out of range for shift"));
13034 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
13039 do_neon_movl (void)
13041 struct neon_type_el et
= neon_check_type (2, NS_QD
,
13042 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13043 unsigned sizebits
= et
.size
>> 3;
13044 inst
.instruction
|= sizebits
<< 19;
13045 neon_two_same (0, et
.type
== NT_unsigned
, -1);
13051 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13052 struct neon_type_el et
= neon_check_type (2, rs
,
13053 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13054 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13055 neon_two_same (neon_quad (rs
), 1, et
.size
);
13059 do_neon_zip_uzp (void)
13061 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13062 struct neon_type_el et
= neon_check_type (2, rs
,
13063 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13064 if (rs
== NS_DD
&& et
.size
== 32)
13066 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13067 inst
.instruction
= N_MNEM_vtrn
;
13071 neon_two_same (neon_quad (rs
), 1, et
.size
);
13075 do_neon_sat_abs_neg (void)
13077 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13078 struct neon_type_el et
= neon_check_type (2, rs
,
13079 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13080 neon_two_same (neon_quad (rs
), 1, et
.size
);
13084 do_neon_pair_long (void)
13086 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13087 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
13088 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13089 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
13090 neon_two_same (neon_quad (rs
), 1, et
.size
);
13094 do_neon_recip_est (void)
13096 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13097 struct neon_type_el et
= neon_check_type (2, rs
,
13098 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
13099 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13100 neon_two_same (neon_quad (rs
), 1, et
.size
);
13106 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13107 struct neon_type_el et
= neon_check_type (2, rs
,
13108 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13109 neon_two_same (neon_quad (rs
), 1, et
.size
);
13115 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13116 struct neon_type_el et
= neon_check_type (2, rs
,
13117 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
13118 neon_two_same (neon_quad (rs
), 1, et
.size
);
13124 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13125 struct neon_type_el et
= neon_check_type (2, rs
,
13126 N_EQK
| N_INT
, N_8
| N_KEY
);
13127 neon_two_same (neon_quad (rs
), 1, et
.size
);
13133 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13134 neon_two_same (neon_quad (rs
), 1, -1);
13138 do_neon_tbl_tbx (void)
13140 unsigned listlenbits
;
13141 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
13143 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
13145 first_error (_("bad list length for table lookup"));
13149 listlenbits
= inst
.operands
[1].imm
- 1;
13150 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13151 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13152 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13153 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13154 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13155 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13156 inst
.instruction
|= listlenbits
<< 8;
13158 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13162 do_neon_ldm_stm (void)
13164 /* P, U and L bits are part of bitmask. */
13165 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
13166 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
13168 if (inst
.operands
[1].issingle
)
13170 do_vfp_nsyn_ldm_stm (is_dbmode
);
13174 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
13175 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13177 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
13178 _("register list must contain at least 1 and at most 16 "
13181 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
13182 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
13183 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13184 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
13186 inst
.instruction
|= offsetbits
;
13188 do_vfp_cond_or_thumb ();
13192 do_neon_ldr_str (void)
13194 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
13196 if (inst
.operands
[0].issingle
)
13199 do_vfp_nsyn_opcode ("flds");
13201 do_vfp_nsyn_opcode ("fsts");
13206 do_vfp_nsyn_opcode ("fldd");
13208 do_vfp_nsyn_opcode ("fstd");
13212 /* "interleave" version also handles non-interleaving register VLD1/VST1
13216 do_neon_ld_st_interleave (void)
13218 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
13219 N_8
| N_16
| N_32
| N_64
);
13220 unsigned alignbits
= 0;
13222 /* The bits in this table go:
13223 0: register stride of one (0) or two (1)
13224 1,2: register list length, minus one (1, 2, 3, 4).
13225 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13226 We use -1 for invalid entries. */
13227 const int typetable
[] =
13229 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13230 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13231 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13232 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13236 if (et
.type
== NT_invtype
)
13239 if (inst
.operands
[1].immisalign
)
13240 switch (inst
.operands
[1].imm
>> 8)
13242 case 64: alignbits
= 1; break;
13244 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
13245 goto bad_alignment
;
13249 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
13250 goto bad_alignment
;
13255 first_error (_("bad alignment"));
13259 inst
.instruction
|= alignbits
<< 4;
13260 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13262 /* Bits [4:6] of the immediate in a list specifier encode register stride
13263 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13264 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13265 up the right value for "type" in a table based on this value and the given
13266 list style, then stick it back. */
13267 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
13268 | (((inst
.instruction
>> 8) & 3) << 3);
13270 typebits
= typetable
[idx
];
13272 constraint (typebits
== -1, _("bad list type for instruction"));
13274 inst
.instruction
&= ~0xf00;
13275 inst
.instruction
|= typebits
<< 8;
13278 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13279 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13280 otherwise. The variable arguments are a list of pairs of legal (size, align)
13281 values, terminated with -1. */
13284 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
13287 int result
= FAIL
, thissize
, thisalign
;
13289 if (!inst
.operands
[1].immisalign
)
13295 va_start (ap
, do_align
);
13299 thissize
= va_arg (ap
, int);
13300 if (thissize
== -1)
13302 thisalign
= va_arg (ap
, int);
13304 if (size
== thissize
&& align
== thisalign
)
13307 while (result
!= SUCCESS
);
13311 if (result
== SUCCESS
)
13314 first_error (_("unsupported alignment for instruction"));
13320 do_neon_ld_st_lane (void)
13322 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
13323 int align_good
, do_align
= 0;
13324 int logsize
= neon_logbits (et
.size
);
13325 int align
= inst
.operands
[1].imm
>> 8;
13326 int n
= (inst
.instruction
>> 8) & 3;
13327 int max_el
= 64 / et
.size
;
13329 if (et
.type
== NT_invtype
)
13332 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
13333 _("bad list length"));
13334 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
13335 _("scalar index out of range"));
13336 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
13338 _("stride of 2 unavailable when element size is 8"));
13342 case 0: /* VLD1 / VST1. */
13343 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
13345 if (align_good
== FAIL
)
13349 unsigned alignbits
= 0;
13352 case 16: alignbits
= 0x1; break;
13353 case 32: alignbits
= 0x3; break;
13356 inst
.instruction
|= alignbits
<< 4;
13360 case 1: /* VLD2 / VST2. */
13361 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
13363 if (align_good
== FAIL
)
13366 inst
.instruction
|= 1 << 4;
13369 case 2: /* VLD3 / VST3. */
13370 constraint (inst
.operands
[1].immisalign
,
13371 _("can't use alignment with this instruction"));
13374 case 3: /* VLD4 / VST4. */
13375 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
13376 16, 64, 32, 64, 32, 128, -1);
13377 if (align_good
== FAIL
)
13381 unsigned alignbits
= 0;
13384 case 8: alignbits
= 0x1; break;
13385 case 16: alignbits
= 0x1; break;
13386 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
13389 inst
.instruction
|= alignbits
<< 4;
13396 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13397 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13398 inst
.instruction
|= 1 << (4 + logsize
);
13400 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
13401 inst
.instruction
|= logsize
<< 10;
13404 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13407 do_neon_ld_dup (void)
13409 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
13410 int align_good
, do_align
= 0;
13412 if (et
.type
== NT_invtype
)
13415 switch ((inst
.instruction
>> 8) & 3)
13417 case 0: /* VLD1. */
13418 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
13419 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
13420 &do_align
, 16, 16, 32, 32, -1);
13421 if (align_good
== FAIL
)
13423 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
13426 case 2: inst
.instruction
|= 1 << 5; break;
13427 default: first_error (_("bad list length")); return;
13429 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13432 case 1: /* VLD2. */
13433 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
13434 &do_align
, 8, 16, 16, 32, 32, 64, -1);
13435 if (align_good
== FAIL
)
13437 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
13438 _("bad list length"));
13439 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13440 inst
.instruction
|= 1 << 5;
13441 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13444 case 2: /* VLD3. */
13445 constraint (inst
.operands
[1].immisalign
,
13446 _("can't use alignment with this instruction"));
13447 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
13448 _("bad list length"));
13449 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13450 inst
.instruction
|= 1 << 5;
13451 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13454 case 3: /* VLD4. */
13456 int align
= inst
.operands
[1].imm
>> 8;
13457 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
13458 16, 64, 32, 64, 32, 128, -1);
13459 if (align_good
== FAIL
)
13461 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
13462 _("bad list length"));
13463 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13464 inst
.instruction
|= 1 << 5;
13465 if (et
.size
== 32 && align
== 128)
13466 inst
.instruction
|= 0x3 << 6;
13468 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13475 inst
.instruction
|= do_align
<< 4;
13478 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13479 apart from bits [11:4]. */
13482 do_neon_ldx_stx (void)
13484 switch (NEON_LANE (inst
.operands
[0].imm
))
13486 case NEON_INTERLEAVE_LANES
:
13487 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
13488 do_neon_ld_st_interleave ();
13491 case NEON_ALL_LANES
:
13492 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
13497 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
13498 do_neon_ld_st_lane ();
13501 /* L bit comes from bit mask. */
13502 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13503 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13504 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13506 if (inst
.operands
[1].postind
)
13508 int postreg
= inst
.operands
[1].imm
& 0xf;
13509 constraint (!inst
.operands
[1].immisreg
,
13510 _("post-index must be a register"));
13511 constraint (postreg
== 0xd || postreg
== 0xf,
13512 _("bad register for post-index"));
13513 inst
.instruction
|= postreg
;
13515 else if (inst
.operands
[1].writeback
)
13517 inst
.instruction
|= 0xd;
13520 inst
.instruction
|= 0xf;
13523 inst
.instruction
|= 0xf9000000;
13525 inst
.instruction
|= 0xf4000000;
13529 /* Overall per-instruction processing. */
13531 /* We need to be able to fix up arbitrary expressions in some statements.
13532 This is so that we can handle symbols that are an arbitrary distance from
13533 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13534 which returns part of an address in a form which will be valid for
13535 a data instruction. We do this by pushing the expression into a symbol
13536 in the expr_section, and creating a fix for that. */
13539 fix_new_arm (fragS
* frag
,
13554 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
13558 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
13563 /* Mark whether the fix is to a THUMB instruction, or an ARM
13565 new_fix
->tc_fix_data
= thumb_mode
;
13568 /* Create a frg for an instruction requiring relaxation. */
13570 output_relax_insn (void)
13576 /* The size of the instruction is unknown, so tie the debug info to the
13577 start of the instruction. */
13578 dwarf2_emit_insn (0);
13580 switch (inst
.reloc
.exp
.X_op
)
13583 sym
= inst
.reloc
.exp
.X_add_symbol
;
13584 offset
= inst
.reloc
.exp
.X_add_number
;
13588 offset
= inst
.reloc
.exp
.X_add_number
;
13591 sym
= make_expr_symbol (&inst
.reloc
.exp
);
13595 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
13596 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
13597 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
13600 /* Write a 32-bit thumb instruction to buf. */
13602 put_thumb32_insn (char * buf
, unsigned long insn
)
13604 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
13605 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
13609 output_inst (const char * str
)
13615 as_bad ("%s -- `%s'", inst
.error
, str
);
13619 output_relax_insn();
13622 if (inst
.size
== 0)
13625 to
= frag_more (inst
.size
);
13627 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
13629 assert (inst
.size
== (2 * THUMB_SIZE
));
13630 put_thumb32_insn (to
, inst
.instruction
);
13632 else if (inst
.size
> INSN_SIZE
)
13634 assert (inst
.size
== (2 * INSN_SIZE
));
13635 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
13636 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
13639 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
13641 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
13642 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
13643 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
13646 dwarf2_emit_insn (inst
.size
);
13649 /* Tag values used in struct asm_opcode's tag field. */
13652 OT_unconditional
, /* Instruction cannot be conditionalized.
13653 The ARM condition field is still 0xE. */
13654 OT_unconditionalF
, /* Instruction cannot be conditionalized
13655 and carries 0xF in its ARM condition field. */
13656 OT_csuffix
, /* Instruction takes a conditional suffix. */
13657 OT_csuffixF
, /* Some forms of the instruction take a conditional
13658 suffix, others place 0xF where the condition field
13660 OT_cinfix3
, /* Instruction takes a conditional infix,
13661 beginning at character index 3. (In
13662 unified mode, it becomes a suffix.) */
13663 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
13664 tsts, cmps, cmns, and teqs. */
13665 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
13666 character index 3, even in unified mode. Used for
13667 legacy instructions where suffix and infix forms
13668 may be ambiguous. */
13669 OT_csuf_or_in3
, /* Instruction takes either a conditional
13670 suffix or an infix at character index 3. */
13671 OT_odd_infix_unc
, /* This is the unconditional variant of an
13672 instruction that takes a conditional infix
13673 at an unusual position. In unified mode,
13674 this variant will accept a suffix. */
13675 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
13676 are the conditional variants of instructions that
13677 take conditional infixes in unusual positions.
13678 The infix appears at character index
13679 (tag - OT_odd_infix_0). These are not accepted
13680 in unified mode. */
13683 /* Subroutine of md_assemble, responsible for looking up the primary
13684 opcode from the mnemonic the user wrote. STR points to the
13685 beginning of the mnemonic.
13687 This is not simply a hash table lookup, because of conditional
13688 variants. Most instructions have conditional variants, which are
13689 expressed with a _conditional affix_ to the mnemonic. If we were
13690 to encode each conditional variant as a literal string in the opcode
13691 table, it would have approximately 20,000 entries.
13693 Most mnemonics take this affix as a suffix, and in unified syntax,
13694 'most' is upgraded to 'all'. However, in the divided syntax, some
13695 instructions take the affix as an infix, notably the s-variants of
13696 the arithmetic instructions. Of those instructions, all but six
13697 have the infix appear after the third character of the mnemonic.
13699 Accordingly, the algorithm for looking up primary opcodes given
13702 1. Look up the identifier in the opcode table.
13703 If we find a match, go to step U.
13705 2. Look up the last two characters of the identifier in the
13706 conditions table. If we find a match, look up the first N-2
13707 characters of the identifier in the opcode table. If we
13708 find a match, go to step CE.
13710 3. Look up the fourth and fifth characters of the identifier in
13711 the conditions table. If we find a match, extract those
13712 characters from the identifier, and look up the remaining
13713 characters in the opcode table. If we find a match, go
13718 U. Examine the tag field of the opcode structure, in case this is
13719 one of the six instructions with its conditional infix in an
13720 unusual place. If it is, the tag tells us where to find the
13721 infix; look it up in the conditions table and set inst.cond
13722 accordingly. Otherwise, this is an unconditional instruction.
13723 Again set inst.cond accordingly. Return the opcode structure.
13725 CE. Examine the tag field to make sure this is an instruction that
13726 should receive a conditional suffix. If it is not, fail.
13727 Otherwise, set inst.cond from the suffix we already looked up,
13728 and return the opcode structure.
13730 CM. Examine the tag field to make sure this is an instruction that
13731 should receive a conditional infix after the third character.
13732 If it is not, fail. Otherwise, undo the edits to the current
13733 line of input and proceed as for case CE. */
13735 static const struct asm_opcode
*
13736 opcode_lookup (char **str
)
13740 const struct asm_opcode
*opcode
;
13741 const struct asm_cond
*cond
;
13743 bfd_boolean neon_supported
;
13745 neon_supported
= ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
);
13747 /* Scan up to the end of the mnemonic, which must end in white space,
13748 '.' (in unified mode, or for Neon instructions), or end of string. */
13749 for (base
= end
= *str
; *end
!= '\0'; end
++)
13750 if (*end
== ' ' || ((unified_syntax
|| neon_supported
) && *end
== '.'))
13756 /* Handle a possible width suffix and/or Neon type suffix. */
13761 /* The .w and .n suffixes are only valid if the unified syntax is in
13763 if (unified_syntax
&& end
[1] == 'w')
13765 else if (unified_syntax
&& end
[1] == 'n')
13770 inst
.vectype
.elems
= 0;
13772 *str
= end
+ offset
;
13774 if (end
[offset
] == '.')
13776 /* See if we have a Neon type suffix (possible in either unified or
13777 non-unified ARM syntax mode). */
13778 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
13781 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
13787 /* Look for unaffixed or special-case affixed mnemonic. */
13788 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
13792 if (opcode
->tag
< OT_odd_infix_0
)
13794 inst
.cond
= COND_ALWAYS
;
13798 if (unified_syntax
)
13799 as_warn (_("conditional infixes are deprecated in unified syntax"));
13800 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
13801 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13804 inst
.cond
= cond
->value
;
13808 /* Cannot have a conditional suffix on a mnemonic of less than two
13810 if (end
- base
< 3)
13813 /* Look for suffixed mnemonic. */
13815 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13816 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
13817 if (opcode
&& cond
)
13820 switch (opcode
->tag
)
13822 case OT_cinfix3_legacy
:
13823 /* Ignore conditional suffixes matched on infix only mnemonics. */
13827 case OT_cinfix3_deprecated
:
13828 case OT_odd_infix_unc
:
13829 if (!unified_syntax
)
13831 /* else fall through */
13835 case OT_csuf_or_in3
:
13836 inst
.cond
= cond
->value
;
13839 case OT_unconditional
:
13840 case OT_unconditionalF
:
13843 inst
.cond
= cond
->value
;
13847 /* delayed diagnostic */
13848 inst
.error
= BAD_COND
;
13849 inst
.cond
= COND_ALWAYS
;
13858 /* Cannot have a usual-position infix on a mnemonic of less than
13859 six characters (five would be a suffix). */
13860 if (end
- base
< 6)
13863 /* Look for infixed mnemonic in the usual position. */
13865 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13869 memcpy (save
, affix
, 2);
13870 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
13871 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
13872 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
13873 memcpy (affix
, save
, 2);
13876 && (opcode
->tag
== OT_cinfix3
13877 || opcode
->tag
== OT_cinfix3_deprecated
13878 || opcode
->tag
== OT_csuf_or_in3
13879 || opcode
->tag
== OT_cinfix3_legacy
))
13883 && (opcode
->tag
== OT_cinfix3
13884 || opcode
->tag
== OT_cinfix3_deprecated
))
13885 as_warn (_("conditional infixes are deprecated in unified syntax"));
13887 inst
.cond
= cond
->value
;
13895 md_assemble (char *str
)
13898 const struct asm_opcode
* opcode
;
13900 /* Align the previous label if needed. */
13901 if (last_label_seen
!= NULL
)
13903 symbol_set_frag (last_label_seen
, frag_now
);
13904 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
13905 S_SET_SEGMENT (last_label_seen
, now_seg
);
13908 memset (&inst
, '\0', sizeof (inst
));
13909 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
13911 opcode
= opcode_lookup (&p
);
13914 /* It wasn't an instruction, but it might be a register alias of
13915 the form alias .req reg, or a Neon .dn/.qn directive. */
13916 if (!create_register_alias (str
, p
)
13917 && !create_neon_reg_alias (str
, p
))
13918 as_bad (_("bad instruction `%s'"), str
);
13923 if (opcode
->tag
== OT_cinfix3_deprecated
)
13924 as_warn (_("s suffix on comparison instruction is deprecated"));
13926 /* The value which unconditional instructions should have in place of the
13927 condition field. */
13928 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
13932 arm_feature_set variant
;
13934 variant
= cpu_variant
;
13935 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
13936 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
13937 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
13938 /* Check that this instruction is supported for this CPU. */
13939 if (!opcode
->tvariant
13940 || (thumb_mode
== 1
13941 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
13943 as_bad (_("selected processor does not support `%s'"), str
);
13946 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
13947 && opcode
->tencode
!= do_t_branch
)
13949 as_bad (_("Thumb does not support conditional execution"));
13953 /* Check conditional suffixes. */
13954 if (current_it_mask
)
13957 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
13958 current_it_mask
<<= 1;
13959 current_it_mask
&= 0x1f;
13960 /* The BKPT instruction is unconditional even in an IT block. */
13962 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
13964 as_bad (_("incorrect condition in IT block"));
13968 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
13970 as_bad (_("thumb conditional instrunction not in IT block"));
13974 mapping_state (MAP_THUMB
);
13975 inst
.instruction
= opcode
->tvalue
;
13977 if (!parse_operands (p
, opcode
->operands
))
13978 opcode
->tencode ();
13980 /* Clear current_it_mask at the end of an IT block. */
13981 if (current_it_mask
== 0x10)
13982 current_it_mask
= 0;
13984 if (!(inst
.error
|| inst
.relax
))
13986 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
13987 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
13988 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
13990 as_bad (_("cannot honor width suffix -- `%s'"), str
);
13994 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
13995 *opcode
->tvariant
);
13996 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
13997 set those bits when Thumb-2 32-bit instructions are seen. ie.
13998 anything other than bl/blx.
13999 This is overly pessimistic for relaxable instructions. */
14000 if ((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
14002 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
14005 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
14007 /* Check that this instruction is supported for this CPU. */
14008 if (!opcode
->avariant
||
14009 !ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
))
14011 as_bad (_("selected processor does not support `%s'"), str
);
14016 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
14020 mapping_state (MAP_ARM
);
14021 inst
.instruction
= opcode
->avalue
;
14022 if (opcode
->tag
== OT_unconditionalF
)
14023 inst
.instruction
|= 0xF << 28;
14025 inst
.instruction
|= inst
.cond
<< 28;
14026 inst
.size
= INSN_SIZE
;
14027 if (!parse_operands (p
, opcode
->operands
))
14028 opcode
->aencode ();
14029 /* Arm mode bx is marked as both v4T and v5 because it's still required
14030 on a hypothetical non-thumb v5 core. */
14031 if (ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v4t
)
14032 || ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v5
))
14033 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
14035 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
14036 *opcode
->avariant
);
14040 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14047 /* Various frobbings of labels and their addresses. */
14050 arm_start_line_hook (void)
14052 last_label_seen
= NULL
;
14056 arm_frob_label (symbolS
* sym
)
14058 last_label_seen
= sym
;
14060 ARM_SET_THUMB (sym
, thumb_mode
);
14062 #if defined OBJ_COFF || defined OBJ_ELF
14063 ARM_SET_INTERWORK (sym
, support_interwork
);
14066 /* Note - do not allow local symbols (.Lxxx) to be labeled
14067 as Thumb functions. This is because these labels, whilst
14068 they exist inside Thumb code, are not the entry points for
14069 possible ARM->Thumb calls. Also, these labels can be used
14070 as part of a computed goto or switch statement. eg gcc
14071 can generate code that looks like this:
14073 ldr r2, [pc, .Laaa]
14083 The first instruction loads the address of the jump table.
14084 The second instruction converts a table index into a byte offset.
14085 The third instruction gets the jump address out of the table.
14086 The fourth instruction performs the jump.
14088 If the address stored at .Laaa is that of a symbol which has the
14089 Thumb_Func bit set, then the linker will arrange for this address
14090 to have the bottom bit set, which in turn would mean that the
14091 address computation performed by the third instruction would end
14092 up with the bottom bit set. Since the ARM is capable of unaligned
14093 word loads, the instruction would then load the incorrect address
14094 out of the jump table, and chaos would ensue. */
14095 if (label_is_thumb_function_name
14096 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
14097 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
14099 /* When the address of a Thumb function is taken the bottom
14100 bit of that address should be set. This will allow
14101 interworking between Arm and Thumb functions to work
14104 THUMB_SET_FUNC (sym
, 1);
14106 label_is_thumb_function_name
= FALSE
;
14109 dwarf2_emit_label (sym
);
14113 arm_data_in_code (void)
14115 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
14117 *input_line_pointer
= '/';
14118 input_line_pointer
+= 5;
14119 *input_line_pointer
= 0;
14127 arm_canonicalize_symbol_name (char * name
)
14131 if (thumb_mode
&& (len
= strlen (name
)) > 5
14132 && streq (name
+ len
- 5, "/data"))
14133 *(name
+ len
- 5) = 0;
14138 /* Table of all register names defined by default. The user can
14139 define additional names with .req. Note that all register names
14140 should appear in both upper and lowercase variants. Some registers
14141 also have mixed-case names. */
14143 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14144 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14145 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14146 #define REGSET(p,t) \
14147 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14148 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14149 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14150 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14151 #define REGSETH(p,t) \
14152 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14153 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14154 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14155 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14156 #define REGSET2(p,t) \
14157 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14158 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14159 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14160 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14162 static const struct reg_entry reg_names
[] =
14164 /* ARM integer registers. */
14165 REGSET(r
, RN
), REGSET(R
, RN
),
14167 /* ATPCS synonyms. */
14168 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
14169 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
14170 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
14172 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
14173 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
14174 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
14176 /* Well-known aliases. */
14177 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
14178 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
14180 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
14181 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
14183 /* Coprocessor numbers. */
14184 REGSET(p
, CP
), REGSET(P
, CP
),
14186 /* Coprocessor register numbers. The "cr" variants are for backward
14188 REGSET(c
, CN
), REGSET(C
, CN
),
14189 REGSET(cr
, CN
), REGSET(CR
, CN
),
14191 /* FPA registers. */
14192 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
14193 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
14195 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
14196 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
14198 /* VFP SP registers. */
14199 REGSET(s
,VFS
), REGSET(S
,VFS
),
14200 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
14202 /* VFP DP Registers. */
14203 REGSET(d
,VFD
), REGSET(D
,VFD
),
14204 /* Extra Neon DP registers. */
14205 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
14207 /* Neon QP registers. */
14208 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
14210 /* VFP control registers. */
14211 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
14212 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
14214 /* Maverick DSP coprocessor registers. */
14215 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
14216 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
14218 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
14219 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
14220 REGDEF(dspsc
,0,DSPSC
),
14222 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
14223 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
14224 REGDEF(DSPSC
,0,DSPSC
),
14226 /* iWMMXt data registers - p0, c0-15. */
14227 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
14229 /* iWMMXt control registers - p1, c0-3. */
14230 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
14231 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
14232 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
14233 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
14235 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14236 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
14237 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
14238 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
14239 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
14241 /* XScale accumulator registers. */
14242 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
14248 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14249 within psr_required_here. */
14250 static const struct asm_psr psrs
[] =
14252 /* Backward compatibility notation. Note that "all" is no longer
14253 truly all possible PSR bits. */
14254 {"all", PSR_c
| PSR_f
},
14258 /* Individual flags. */
14263 /* Combinations of flags. */
14264 {"fs", PSR_f
| PSR_s
},
14265 {"fx", PSR_f
| PSR_x
},
14266 {"fc", PSR_f
| PSR_c
},
14267 {"sf", PSR_s
| PSR_f
},
14268 {"sx", PSR_s
| PSR_x
},
14269 {"sc", PSR_s
| PSR_c
},
14270 {"xf", PSR_x
| PSR_f
},
14271 {"xs", PSR_x
| PSR_s
},
14272 {"xc", PSR_x
| PSR_c
},
14273 {"cf", PSR_c
| PSR_f
},
14274 {"cs", PSR_c
| PSR_s
},
14275 {"cx", PSR_c
| PSR_x
},
14276 {"fsx", PSR_f
| PSR_s
| PSR_x
},
14277 {"fsc", PSR_f
| PSR_s
| PSR_c
},
14278 {"fxs", PSR_f
| PSR_x
| PSR_s
},
14279 {"fxc", PSR_f
| PSR_x
| PSR_c
},
14280 {"fcs", PSR_f
| PSR_c
| PSR_s
},
14281 {"fcx", PSR_f
| PSR_c
| PSR_x
},
14282 {"sfx", PSR_s
| PSR_f
| PSR_x
},
14283 {"sfc", PSR_s
| PSR_f
| PSR_c
},
14284 {"sxf", PSR_s
| PSR_x
| PSR_f
},
14285 {"sxc", PSR_s
| PSR_x
| PSR_c
},
14286 {"scf", PSR_s
| PSR_c
| PSR_f
},
14287 {"scx", PSR_s
| PSR_c
| PSR_x
},
14288 {"xfs", PSR_x
| PSR_f
| PSR_s
},
14289 {"xfc", PSR_x
| PSR_f
| PSR_c
},
14290 {"xsf", PSR_x
| PSR_s
| PSR_f
},
14291 {"xsc", PSR_x
| PSR_s
| PSR_c
},
14292 {"xcf", PSR_x
| PSR_c
| PSR_f
},
14293 {"xcs", PSR_x
| PSR_c
| PSR_s
},
14294 {"cfs", PSR_c
| PSR_f
| PSR_s
},
14295 {"cfx", PSR_c
| PSR_f
| PSR_x
},
14296 {"csf", PSR_c
| PSR_s
| PSR_f
},
14297 {"csx", PSR_c
| PSR_s
| PSR_x
},
14298 {"cxf", PSR_c
| PSR_x
| PSR_f
},
14299 {"cxs", PSR_c
| PSR_x
| PSR_s
},
14300 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
14301 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
14302 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
14303 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
14304 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
14305 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
14306 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
14307 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
14308 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
14309 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
14310 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
14311 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
14312 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
14313 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
14314 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
14315 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
14316 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
14317 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
14318 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
14319 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
14320 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
14321 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
14322 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
14323 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
14326 /* Table of V7M psr names. */
14327 static const struct asm_psr v7m_psrs
[] =
14340 {"basepri_max", 18},
14345 /* Table of all shift-in-operand names. */
14346 static const struct asm_shift_name shift_names
[] =
14348 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
14349 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
14350 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
14351 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
14352 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
14353 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
14356 /* Table of all explicit relocation names. */
14358 static struct reloc_entry reloc_names
[] =
14360 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
14361 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
14362 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
14363 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
14364 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
14365 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
14366 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
14367 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
14368 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
14369 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
14370 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
14374 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14375 static const struct asm_cond conds
[] =
14379 {"cs", 0x2}, {"hs", 0x2},
14380 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14394 static struct asm_barrier_opt barrier_opt_names
[] =
14402 /* Table of ARM-format instructions. */
14404 /* Macros for gluing together operand strings. N.B. In all cases
14405 other than OPS0, the trailing OP_stop comes from default
14406 zero-initialization of the unspecified elements of the array. */
14407 #define OPS0() { OP_stop, }
14408 #define OPS1(a) { OP_##a, }
14409 #define OPS2(a,b) { OP_##a,OP_##b, }
14410 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14411 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14412 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14413 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14415 /* These macros abstract out the exact format of the mnemonic table and
14416 save some repeated characters. */
14418 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14419 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14420 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14421 THUMB_VARIANT, do_##ae, do_##te }
14423 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14424 a T_MNEM_xyz enumerator. */
14425 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14426 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14427 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14428 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14430 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14431 infix after the third character. */
14432 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14433 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14434 THUMB_VARIANT, do_##ae, do_##te }
14435 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14436 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14437 THUMB_VARIANT, do_##ae, do_##te }
14438 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14439 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14440 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14441 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14442 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14443 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14444 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14445 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14447 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14448 appear in the condition table. */
14449 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14450 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14451 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14453 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14454 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14455 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14456 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14457 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14458 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14459 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14460 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14461 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14462 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14463 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14464 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14465 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14466 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14467 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14468 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14469 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14470 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14471 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14472 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14474 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14475 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14476 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14477 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14479 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14480 field is still 0xE. Many of the Thumb variants can be executed
14481 conditionally, so this is checked separately. */
14482 #define TUE(mnem, op, top, nops, ops, ae, te) \
14483 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14484 THUMB_VARIANT, do_##ae, do_##te }
14486 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14487 condition code field. */
14488 #define TUF(mnem, op, top, nops, ops, ae, te) \
14489 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14490 THUMB_VARIANT, do_##ae, do_##te }
14492 /* ARM-only variants of all the above. */
14493 #define CE(mnem, op, nops, ops, ae) \
14494 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14496 #define C3(mnem, op, nops, ops, ae) \
14497 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14499 /* Legacy mnemonics that always have conditional infix after the third
14501 #define CL(mnem, op, nops, ops, ae) \
14502 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14503 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14505 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14506 #define cCE(mnem, op, nops, ops, ae) \
14507 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14509 /* Legacy coprocessor instructions where conditional infix and conditional
14510 suffix are ambiguous. For consistency this includes all FPA instructions,
14511 not just the potentially ambiguous ones. */
14512 #define cCL(mnem, op, nops, ops, ae) \
14513 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14514 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14516 /* Coprocessor, takes either a suffix or a position-3 infix
14517 (for an FPA corner case). */
14518 #define C3E(mnem, op, nops, ops, ae) \
14519 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14520 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14522 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14523 { #m1 #m2 #m3, OPS##nops ops, \
14524 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14525 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14527 #define CM(m1, m2, op, nops, ops, ae) \
14528 xCM_(m1, , m2, op, nops, ops, ae), \
14529 xCM_(m1, eq, m2, op, nops, ops, ae), \
14530 xCM_(m1, ne, m2, op, nops, ops, ae), \
14531 xCM_(m1, cs, m2, op, nops, ops, ae), \
14532 xCM_(m1, hs, m2, op, nops, ops, ae), \
14533 xCM_(m1, cc, m2, op, nops, ops, ae), \
14534 xCM_(m1, ul, m2, op, nops, ops, ae), \
14535 xCM_(m1, lo, m2, op, nops, ops, ae), \
14536 xCM_(m1, mi, m2, op, nops, ops, ae), \
14537 xCM_(m1, pl, m2, op, nops, ops, ae), \
14538 xCM_(m1, vs, m2, op, nops, ops, ae), \
14539 xCM_(m1, vc, m2, op, nops, ops, ae), \
14540 xCM_(m1, hi, m2, op, nops, ops, ae), \
14541 xCM_(m1, ls, m2, op, nops, ops, ae), \
14542 xCM_(m1, ge, m2, op, nops, ops, ae), \
14543 xCM_(m1, lt, m2, op, nops, ops, ae), \
14544 xCM_(m1, gt, m2, op, nops, ops, ae), \
14545 xCM_(m1, le, m2, op, nops, ops, ae), \
14546 xCM_(m1, al, m2, op, nops, ops, ae)
14548 #define UE(mnem, op, nops, ops, ae) \
14549 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14551 #define UF(mnem, op, nops, ops, ae) \
14552 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14554 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14555 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14556 use the same encoding function for each. */
14557 #define NUF(mnem, op, nops, ops, enc) \
14558 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14559 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14561 /* Neon data processing, version which indirects through neon_enc_tab for
14562 the various overloaded versions of opcodes. */
14563 #define nUF(mnem, op, nops, ops, enc) \
14564 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14565 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14567 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14569 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14570 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14571 THUMB_VARIANT, do_##enc, do_##enc }
14573 #define NCE(mnem, op, nops, ops, enc) \
14574 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14576 #define NCEF(mnem, op, nops, ops, enc) \
14577 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14579 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14580 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14581 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14582 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14584 #define nCE(mnem, op, nops, ops, enc) \
14585 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14587 #define nCEF(mnem, op, nops, ops, enc) \
14588 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14592 /* Thumb-only, unconditional. */
14593 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14595 static const struct asm_opcode insns
[] =
14597 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14598 #define THUMB_VARIANT &arm_ext_v4t
14599 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14600 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14601 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14602 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14603 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14604 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14605 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14606 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14607 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14608 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14609 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14610 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14611 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14612 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14613 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14614 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14616 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14617 for setting PSR flag bits. They are obsolete in V6 and do not
14618 have Thumb equivalents. */
14619 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14620 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14621 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
14622 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14623 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14624 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
14625 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14626 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14627 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
14629 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14630 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14631 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14632 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14634 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14635 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14636 tCE(str
, 4000000, str
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14637 tC3(strb
, 4400000, strb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14639 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14640 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14641 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14642 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14643 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14644 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14646 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14647 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14648 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
14649 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
14652 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
14653 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
14654 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
14656 /* Thumb-compatibility pseudo ops. */
14657 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14658 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14659 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14660 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14661 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14662 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14663 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14664 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14665 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
14666 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
14667 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
14668 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
14670 #undef THUMB_VARIANT
14671 #define THUMB_VARIANT &arm_ext_v6
14672 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
14674 /* V1 instructions with no Thumb analogue prior to V6T2. */
14675 #undef THUMB_VARIANT
14676 #define THUMB_VARIANT &arm_ext_v6t2
14677 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14678 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14679 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14680 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14681 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
14683 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14684 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14685 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14686 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14688 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14689 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14691 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14692 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14694 /* V1 instructions with no Thumb analogue at all. */
14695 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
14696 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
14698 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14699 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14700 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14701 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14702 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14703 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14704 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14705 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14708 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14709 #undef THUMB_VARIANT
14710 #define THUMB_VARIANT &arm_ext_v4t
14711 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14712 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14714 #undef THUMB_VARIANT
14715 #define THUMB_VARIANT &arm_ext_v6t2
14716 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
14717 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
14719 /* Generic coprocessor instructions. */
14720 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
14721 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14722 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14723 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14724 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14725 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14726 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14729 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14730 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
14731 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
14734 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14735 TCE(mrs
, 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
14736 TCE(msr
, 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
14739 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14740 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14741 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14742 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14743 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14744 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14745 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14746 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14747 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14750 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14751 #undef THUMB_VARIANT
14752 #define THUMB_VARIANT &arm_ext_v4t
14753 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14754 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14755 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14756 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14757 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14758 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14761 #define ARM_VARIANT &arm_ext_v4t_5
14762 /* ARM Architecture 4T. */
14763 /* Note: bx (and blx) are required on V5, even if the processor does
14764 not support Thumb. */
14765 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
14768 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14769 #undef THUMB_VARIANT
14770 #define THUMB_VARIANT &arm_ext_v5t
14771 /* Note: blx has 2 variants; the .value coded here is for
14772 BLX(2). Only this variant has conditional execution. */
14773 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
14774 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
14776 #undef THUMB_VARIANT
14777 #define THUMB_VARIANT &arm_ext_v6t2
14778 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
14779 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14780 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14781 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14782 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14783 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
14784 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14785 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14788 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14789 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14790 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14791 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14792 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14794 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14795 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14797 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14798 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14799 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14800 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14802 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14803 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14804 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14805 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14807 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14808 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14810 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14811 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14812 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14813 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14816 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14817 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
14818 TC3(ldrd
, 00000d0
, e9500000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
14819 TC3(strd
, 00000f0
, e9400000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
14821 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14822 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14825 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14826 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
14829 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14830 #undef THUMB_VARIANT
14831 #define THUMB_VARIANT &arm_ext_v6
14832 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
14833 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
14834 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14835 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14836 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14837 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14838 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14839 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14840 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14841 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
14843 #undef THUMB_VARIANT
14844 #define THUMB_VARIANT &arm_ext_v6t2
14845 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
14846 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14847 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14849 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
14850 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
14852 /* ARM V6 not included in V7M (eg. integer SIMD). */
14853 #undef THUMB_VARIANT
14854 #define THUMB_VARIANT &arm_ext_v6_notm
14855 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
14856 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
14857 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
14858 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14859 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14860 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14861 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14862 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14863 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14864 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14865 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14866 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14867 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14868 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14869 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14870 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14871 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14872 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14873 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14874 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14875 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14876 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14877 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14878 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14879 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14880 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14881 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14882 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14883 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14884 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14885 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14886 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14887 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14888 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14889 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14890 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14891 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14892 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14893 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14894 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
14895 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
14896 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
14897 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
14898 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
14899 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
14900 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
14901 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
14902 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14903 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14904 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14905 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14906 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14907 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14908 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14909 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14910 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14911 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14912 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14913 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14914 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14915 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14916 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14917 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14918 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14919 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14920 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14921 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14922 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14923 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14924 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14925 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14926 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14927 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14928 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14929 TUF(srsia
, 8cd0500
, e980c000
, 1, (I31w
), srs
, srs
),
14930 UF(srsib
, 9cd0500
, 1, (I31w
), srs
),
14931 UF(srsda
, 84d0500
, 1, (I31w
), srs
),
14932 TUF(srsdb
, 94d0500
, e800c000
, 1, (I31w
), srs
, srs
),
14933 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
14934 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
14935 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
14936 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14937 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14938 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
14941 #define ARM_VARIANT &arm_ext_v6k
14942 #undef THUMB_VARIANT
14943 #define THUMB_VARIANT &arm_ext_v6k
14944 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
14945 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
14946 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
14947 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
14949 #undef THUMB_VARIANT
14950 #define THUMB_VARIANT &arm_ext_v6_notm
14951 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
14952 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
14954 #undef THUMB_VARIANT
14955 #define THUMB_VARIANT &arm_ext_v6t2
14956 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
14957 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
14958 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
14959 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
14960 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
14963 #define ARM_VARIANT &arm_ext_v6z
14964 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
14967 #define ARM_VARIANT &arm_ext_v6t2
14968 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
14969 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
14970 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
14971 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
14973 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
14974 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
14975 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
14976 TCE(rbit
, 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
14978 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14979 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14980 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14981 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14983 UT(cbnz
, b900
, 2, (RR
, EXP
), t_cbz
),
14984 UT(cbz
, b100
, 2, (RR
, EXP
), t_cbz
),
14985 /* ARM does not really have an IT instruction, so always allow it. */
14987 #define ARM_VARIANT &arm_ext_v1
14988 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
14989 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
14990 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
14991 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
14992 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
14993 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
14994 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
14995 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
14996 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
14997 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
14998 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
14999 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
15000 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
15001 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
15002 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
15004 /* Thumb2 only instructions. */
15006 #define ARM_VARIANT NULL
15008 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15009 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15010 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
15011 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
15013 /* Thumb-2 hardware division instructions (R and M profiles only). */
15014 #undef THUMB_VARIANT
15015 #define THUMB_VARIANT &arm_ext_div
15016 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15017 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15019 /* ARM V7 instructions. */
15021 #define ARM_VARIANT &arm_ext_v7
15022 #undef THUMB_VARIANT
15023 #define THUMB_VARIANT &arm_ext_v7
15024 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
15025 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
15026 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
15027 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
15028 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
15031 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15032 cCE(wfs
, e200110
, 1, (RR
), rd
),
15033 cCE(rfs
, e300110
, 1, (RR
), rd
),
15034 cCE(wfc
, e400110
, 1, (RR
), rd
),
15035 cCE(rfc
, e500110
, 1, (RR
), rd
),
15037 cCL(ldfs
, c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15038 cCL(ldfd
, c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15039 cCL(ldfe
, c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15040 cCL(ldfp
, c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15042 cCL(stfs
, c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15043 cCL(stfd
, c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15044 cCL(stfe
, c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15045 cCL(stfp
, c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15047 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
15048 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
15049 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
15050 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
15051 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
15052 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
15053 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
15054 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
15055 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
15056 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
15057 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
15058 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
15060 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
15061 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
15062 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
15063 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
15064 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
15065 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
15066 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
15067 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
15068 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
15069 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
15070 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
15071 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
15073 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
15074 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
15075 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
15076 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
15077 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
15078 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
15079 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
15080 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
15081 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
15082 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
15083 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
15084 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
15086 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
15087 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
15088 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
15089 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
15090 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
15091 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
15092 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
15093 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
15094 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
15095 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
15096 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
15097 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
15099 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
15100 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
15101 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
15102 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
15103 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
15104 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
15105 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
15106 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
15107 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
15108 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
15109 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
15110 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
15112 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
15113 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
15114 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
15115 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
15116 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
15117 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
15118 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
15119 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
15120 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
15121 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
15122 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
15123 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
15125 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
15126 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
15127 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
15128 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
15129 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
15130 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
15131 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
15132 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
15133 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
15134 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
15135 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
15136 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
15138 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
15139 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
15140 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
15141 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
15142 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
15143 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
15144 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
15145 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
15146 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
15147 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
15148 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
15149 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
15151 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
15152 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
15153 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
15154 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
15155 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
15156 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
15157 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
15158 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
15159 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
15160 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
15161 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
15162 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
15164 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
15165 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
15166 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
15167 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
15168 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
15169 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
15170 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
15171 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
15172 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
15173 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
15174 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
15175 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
15177 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
15178 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
15179 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
15180 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
15181 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
15182 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
15183 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
15184 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
15185 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
15186 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
15187 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
15188 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
15190 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
15191 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
15192 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
15193 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
15194 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
15195 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
15196 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
15197 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
15198 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
15199 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
15200 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
15201 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
15203 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
15204 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
15205 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
15206 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
15207 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
15208 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
15209 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
15210 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
15211 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
15212 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
15213 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
15214 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
15216 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
15217 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
15218 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
15219 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
15220 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
15221 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
15222 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
15223 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
15224 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
15225 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
15226 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
15227 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
15229 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
15230 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
15231 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
15232 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
15233 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
15234 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
15235 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
15236 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
15237 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
15238 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
15239 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
15240 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
15242 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
15243 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
15244 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
15245 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
15246 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
15247 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
15248 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
15249 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
15250 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
15251 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
15252 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
15253 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
15255 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15256 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15257 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15258 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15259 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15260 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15261 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15262 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15263 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15264 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15265 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15266 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15268 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15269 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15270 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15271 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15272 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15273 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15274 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15275 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15276 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15277 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15278 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15279 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15281 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15282 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15283 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15284 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15285 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15286 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15287 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15288 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15289 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15290 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15291 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15292 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15294 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15295 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15296 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15297 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15298 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15299 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15300 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15301 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15302 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15303 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15304 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15305 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15307 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15308 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15309 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15310 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15311 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15312 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15313 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15314 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15315 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15316 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15317 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15318 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15320 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15321 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15322 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15323 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15324 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15325 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15326 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15327 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15328 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15329 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15330 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15331 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15333 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15334 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15335 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15336 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15337 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15338 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15339 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15340 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15341 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15342 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15343 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15344 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15346 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15347 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15348 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15349 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15350 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15351 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15352 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15353 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15354 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15355 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15356 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15357 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15359 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15360 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15361 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15362 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15363 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15364 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15365 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15366 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15367 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15368 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15369 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15370 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15372 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15373 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15374 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15375 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15376 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15377 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15378 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15379 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15380 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15381 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15382 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15383 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15385 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15386 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15387 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15388 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15389 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15390 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15391 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15392 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15393 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15394 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15395 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15396 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15398 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15399 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15400 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15401 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15402 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15403 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15404 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15405 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15406 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15407 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15408 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15409 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15411 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15412 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15413 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15414 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15415 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15416 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15417 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15418 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15419 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15420 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15421 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15422 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15424 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15425 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15426 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15427 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15429 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
15430 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
15431 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
15432 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
15433 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
15434 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
15435 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
15436 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
15437 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
15438 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
15439 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
15440 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
15442 /* The implementation of the FIX instruction is broken on some
15443 assemblers, in that it accepts a precision specifier as well as a
15444 rounding specifier, despite the fact that this is meaningless.
15445 To be more compatible, we accept it as well, though of course it
15446 does not set any bits. */
15447 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
15448 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
15449 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
15450 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
15451 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
15452 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
15453 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
15454 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
15455 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
15456 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
15457 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
15458 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
15459 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
15461 /* Instructions that were new with the real FPA, call them V2. */
15463 #define ARM_VARIANT &fpu_fpa_ext_v2
15464 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15465 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15466 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15467 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15468 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15469 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15472 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15473 /* Moves and type conversions. */
15474 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15475 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
15476 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
15477 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
15478 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15479 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15480 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15481 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15482 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15483 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15484 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
15485 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
15487 /* Memory operations. */
15488 cCE(flds
, d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
15489 cCE(fsts
, d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
15490 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15491 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15492 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15493 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15494 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15495 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15496 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15497 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15498 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15499 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15500 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15501 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15502 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15503 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15504 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15505 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15507 /* Monadic operations. */
15508 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15509 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15510 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15512 /* Dyadic operations. */
15513 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15514 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15515 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15516 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15517 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15518 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15519 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15520 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15521 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15524 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15525 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
15526 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15527 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
15530 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15531 /* Moves and type conversions. */
15532 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15533 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15534 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15535 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15536 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15537 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15538 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15539 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15540 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15541 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15542 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15543 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15544 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15546 /* Memory operations. */
15547 cCE(fldd
, d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15548 cCE(fstd
, d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15549 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15550 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15551 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15552 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15553 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15554 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15555 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15556 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15558 /* Monadic operations. */
15559 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15560 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15561 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15563 /* Dyadic operations. */
15564 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15565 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15566 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15567 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15568 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15569 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15570 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15571 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15572 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15575 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15576 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
15577 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15578 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
15581 #define ARM_VARIANT &fpu_vfp_ext_v2
15582 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
15583 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
15584 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
15585 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
15587 /* Instructions which may belong to either the Neon or VFP instruction sets.
15588 Individual encoder functions perform additional architecture checks. */
15590 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15591 #undef THUMB_VARIANT
15592 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15593 /* These mnemonics are unique to VFP. */
15594 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
15595 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
15596 nCE(vnmul
, vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15597 nCE(vnmla
, vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15598 nCE(vnmls
, vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15599 nCE(vcmp
, vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15600 nCE(vcmpe
, vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15601 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
15602 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
15603 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
15605 /* Mnemonics shared by Neon and VFP. */
15606 nCEF(vmul
, vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
15607 nCEF(vmla
, vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15608 nCEF(vmls
, vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15610 nCEF(vadd
, vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15611 nCEF(vsub
, vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15613 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15614 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15616 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15617 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15618 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15619 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15620 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15621 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15622 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15623 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15625 nCEF(vcvt
, vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
15627 /* NOTE: All VMOV encoding is special-cased! */
15628 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
15629 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
15631 #undef THUMB_VARIANT
15632 #define THUMB_VARIANT &fpu_neon_ext_v1
15634 #define ARM_VARIANT &fpu_neon_ext_v1
15635 /* Data processing with three registers of the same length. */
15636 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15637 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
15638 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
15639 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15640 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15641 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15642 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15643 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15644 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15645 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15646 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15647 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15648 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15649 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15650 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15651 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15652 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15653 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15654 /* If not immediate, fall back to neon_dyadic_i64_su.
15655 shl_imm should accept I8 I16 I32 I64,
15656 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15657 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
15658 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
15659 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
15660 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
15661 /* Logic ops, types optional & ignored. */
15662 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
15663 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
15664 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
15665 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
15666 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
15667 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
15668 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
15669 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
15670 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
15671 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
15672 /* Bitfield ops, untyped. */
15673 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15674 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15675 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15676 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15677 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15678 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15679 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15680 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15681 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15682 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15683 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15684 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15685 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15686 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15687 back to neon_dyadic_if_su. */
15688 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15689 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15690 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15691 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15692 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15693 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15694 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15695 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15696 /* Comparison. Type I8 I16 I32 F32. */
15697 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
15698 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
15699 /* As above, D registers only. */
15700 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15701 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15702 /* Int and float variants, signedness unimportant. */
15703 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15704 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15705 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
15706 /* Add/sub take types I8 I16 I32 I64 F32. */
15707 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15708 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15709 /* vtst takes sizes 8, 16, 32. */
15710 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
15711 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
15712 /* VMUL takes I8 I16 I32 F32 P8. */
15713 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
15714 /* VQD{R}MULH takes S16 S32. */
15715 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15716 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
15717 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15718 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
15719 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
15720 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
15721 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
15722 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
15723 NUF(vaclt
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
15724 NUF(vacltq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
15725 NUF(vacle
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
15726 NUF(vacleq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
15727 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
15728 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
15729 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
15730 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
15732 /* Two address, int/float. Types S8 S16 S32 F32. */
15733 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
15734 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
15736 /* Data processing with two registers and a shift amount. */
15737 /* Right shifts, and variants with rounding.
15738 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15739 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
15740 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
15741 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
15742 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
15743 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
15744 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
15745 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
15746 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
15747 /* Shift and insert. Sizes accepted 8 16 32 64. */
15748 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
15749 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
15750 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
15751 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
15752 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15753 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
15754 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
15755 /* Right shift immediate, saturating & narrowing, with rounding variants.
15756 Types accepted S16 S32 S64 U16 U32 U64. */
15757 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
15758 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
15759 /* As above, unsigned. Types accepted S16 S32 S64. */
15760 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
15761 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
15762 /* Right shift narrowing. Types accepted I16 I32 I64. */
15763 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
15764 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
15765 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15766 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
15767 /* CVT with optional immediate for fixed-point variant. */
15768 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
15770 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
15771 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
15773 /* Data processing, three registers of different lengths. */
15774 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15775 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
15776 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15777 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15778 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15779 /* If not scalar, fall back to neon_dyadic_long.
15780 Vector types as above, scalar types S16 S32 U16 U32. */
15781 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
15782 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
15783 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15784 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
15785 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
15786 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15787 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15788 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15789 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15790 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15791 /* Saturating doubling multiplies. Types S16 S32. */
15792 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15793 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15794 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15795 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15796 S16 S32 U16 U32. */
15797 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
15799 /* Extract. Size 8. */
15800 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I7
), neon_ext
),
15801 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I7
), neon_ext
),
15803 /* Two registers, miscellaneous. */
15804 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15805 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
15806 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
15807 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
15808 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
15809 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
15810 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
15811 /* Vector replicate. Sizes 8 16 32. */
15812 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
15813 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
15814 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15815 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
15816 /* VMOVN. Types I16 I32 I64. */
15817 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
15818 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15819 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
15820 /* VQMOVUN. Types S16 S32 S64. */
15821 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
15822 /* VZIP / VUZP. Sizes 8 16 32. */
15823 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
15824 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
15825 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
15826 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
15827 /* VQABS / VQNEG. Types S8 S16 S32. */
15828 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
15829 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
15830 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
15831 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
15832 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15833 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
15834 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
15835 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
15836 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
15837 /* Reciprocal estimates. Types U32 F32. */
15838 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
15839 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
15840 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
15841 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
15842 /* VCLS. Types S8 S16 S32. */
15843 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
15844 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
15845 /* VCLZ. Types I8 I16 I32. */
15846 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
15847 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
15848 /* VCNT. Size 8. */
15849 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
15850 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
15851 /* Two address, untyped. */
15852 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
15853 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
15854 /* VTRN. Sizes 8 16 32. */
15855 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
15856 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
15858 /* Table lookup. Size 8. */
15859 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
15860 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
15862 #undef THUMB_VARIANT
15863 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15865 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15866 /* Neon element/structure load/store. */
15867 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15868 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15869 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15870 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15871 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15872 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15873 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15874 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15876 #undef THUMB_VARIANT
15877 #define THUMB_VARIANT &fpu_vfp_ext_v3
15879 #define ARM_VARIANT &fpu_vfp_ext_v3
15880 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
15881 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
15882 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15883 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15884 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15885 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15886 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15887 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15888 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15889 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15890 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15891 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15892 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15893 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15894 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15895 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15896 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15897 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15899 #undef THUMB_VARIANT
15901 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15902 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15903 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15904 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15905 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15906 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15907 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15908 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
15909 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
15912 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
15913 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
15914 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
15915 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
15916 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
15917 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
15918 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
15919 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
15920 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
15921 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
15922 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15923 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15924 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15925 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15926 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15927 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15928 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15929 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15930 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15931 cCE(tmcr
, e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
15932 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
15933 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15934 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15935 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15936 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15937 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15938 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15939 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
15940 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
15941 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
15942 cCE(tmrc
, e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
15943 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
15944 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
15945 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
15946 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
15947 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15948 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15949 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15950 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15951 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15952 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15953 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15954 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15955 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15956 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15957 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15958 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15959 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
15960 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15961 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15962 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15963 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15964 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15965 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15966 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15967 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15968 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15969 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15970 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15971 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15972 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15973 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15974 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15975 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15976 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15977 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15978 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15979 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15980 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15981 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
15982 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
15983 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15984 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15985 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15986 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15987 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15988 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15989 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15990 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15991 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15992 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15993 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15994 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15995 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15996 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15997 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15998 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15999 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16000 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16001 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
16002 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16003 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16004 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16005 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16006 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16007 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16008 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16009 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16010 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16011 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16012 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16013 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16014 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16015 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16016 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16017 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16018 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16019 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16020 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16021 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16022 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16023 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
16024 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16025 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16026 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16027 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16028 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16029 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16030 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16031 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16032 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16033 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16034 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16035 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16036 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16037 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16038 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16039 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16040 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16041 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16042 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16043 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16044 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
16045 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
16046 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16047 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16048 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16049 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16050 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16051 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16052 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16053 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16054 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16055 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16056 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16057 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16058 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16059 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16060 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16061 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16062 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16063 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16064 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16065 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16066 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16067 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16068 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16069 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16070 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16071 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16072 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16073 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16074 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
16077 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16078 cCE(torvscb
, e13f190
, 1, (RR
), iwmmxt_tandorc
),
16079 cCE(torvsch
, e53f190
, 1, (RR
), iwmmxt_tandorc
),
16080 cCE(torvscw
, e93f190
, 1, (RR
), iwmmxt_tandorc
),
16081 cCE(wabsb
, e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16082 cCE(wabsh
, e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16083 cCE(wabsw
, ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16084 cCE(wabsdiffb
, e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16085 cCE(wabsdiffh
, e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16086 cCE(wabsdiffw
, e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16087 cCE(waddbhusl
, e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16088 cCE(waddbhusm
, e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16089 cCE(waddhc
, e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16090 cCE(waddwc
, ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16091 cCE(waddsubhx
, ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16092 cCE(wavg4
, e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16093 cCE(wavg4r
, e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16094 cCE(wmaddsn
, ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16095 cCE(wmaddsx
, eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16096 cCE(wmaddun
, ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16097 cCE(wmaddux
, e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16098 cCE(wmerge
, e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
16099 cCE(wmiabb
, e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16100 cCE(wmiabt
, e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16101 cCE(wmiatb
, e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16102 cCE(wmiatt
, e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16103 cCE(wmiabbn
, e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16104 cCE(wmiabtn
, e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16105 cCE(wmiatbn
, e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16106 cCE(wmiattn
, e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16107 cCE(wmiawbb
, e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16108 cCE(wmiawbt
, e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16109 cCE(wmiawtb
, ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16110 cCE(wmiawtt
, eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16111 cCE(wmiawbbn
, ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16112 cCE(wmiawbtn
, ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16113 cCE(wmiawtbn
, ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16114 cCE(wmiawttn
, ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16115 cCE(wmulsmr
, ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16116 cCE(wmulumr
, ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16117 cCE(wmulwumr
, ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16118 cCE(wmulwsmr
, ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16119 cCE(wmulwum
, ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16120 cCE(wmulwsm
, ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16121 cCE(wmulwl
, eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16122 cCE(wqmiabb
, e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16123 cCE(wqmiabt
, e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16124 cCE(wqmiatb
, ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16125 cCE(wqmiatt
, eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16126 cCE(wqmiabbn
, ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16127 cCE(wqmiabtn
, ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16128 cCE(wqmiatbn
, ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16129 cCE(wqmiattn
, ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16130 cCE(wqmulm
, e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16131 cCE(wqmulmr
, e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16132 cCE(wqmulwm
, ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16133 cCE(wqmulwmr
, ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16134 cCE(wsubaddhx
, ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16137 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16138 cCE(cfldrs
, c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
16139 cCE(cfldrd
, c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
16140 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
16141 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
16142 cCE(cfstrs
, c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
16143 cCE(cfstrd
, c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
16144 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
16145 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
16146 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
16147 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
16148 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
16149 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
16150 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
16151 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
16152 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
16153 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
16154 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
16155 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
16156 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
16157 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
16158 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
16159 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
16160 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
16161 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
16162 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
16163 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
16164 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
16165 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
16166 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
16167 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
16168 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
16169 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
16170 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
16171 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
16172 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
16173 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
16174 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
16175 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
16176 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
16177 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
16178 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
16179 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
16180 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
16181 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
16182 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
16183 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
16184 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
16185 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
16186 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
16187 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
16188 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
16189 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
16190 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
16191 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
16192 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16193 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16194 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16195 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16196 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16197 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16198 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
16199 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
16200 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
16201 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
16202 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16203 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16204 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16205 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16206 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16207 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16208 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16209 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16210 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
16211 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
16212 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
16213 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
16216 #undef THUMB_VARIANT
16243 /* MD interface: bits in the object file. */
16245 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16246 for use in the a.out file, and stores them in the array pointed to by buf.
16247 This knows about the endian-ness of the target machine and does
16248 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16249 2 (short) and 4 (long) Floating numbers are put out as a series of
16250 LITTLENUMS (shorts, here at least). */
16253 md_number_to_chars (char * buf
, valueT val
, int n
)
16255 if (target_big_endian
)
16256 number_to_chars_bigendian (buf
, val
, n
);
16258 number_to_chars_littleendian (buf
, val
, n
);
16262 md_chars_to_number (char * buf
, int n
)
16265 unsigned char * where
= (unsigned char *) buf
;
16267 if (target_big_endian
)
16272 result
|= (*where
++ & 255);
16280 result
|= (where
[n
] & 255);
16287 /* MD interface: Sections. */
16289 /* Estimate the size of a frag before relaxing. Assume everything fits in
16293 md_estimate_size_before_relax (fragS
* fragp
,
16294 segT segtype ATTRIBUTE_UNUSED
)
16300 /* Convert a machine dependent frag. */
16303 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
16305 unsigned long insn
;
16306 unsigned long old_op
;
16314 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
16316 old_op
= bfd_get_16(abfd
, buf
);
16317 if (fragp
->fr_symbol
) {
16318 exp
.X_op
= O_symbol
;
16319 exp
.X_add_symbol
= fragp
->fr_symbol
;
16321 exp
.X_op
= O_constant
;
16323 exp
.X_add_number
= fragp
->fr_offset
;
16324 opcode
= fragp
->fr_subtype
;
16327 case T_MNEM_ldr_pc
:
16328 case T_MNEM_ldr_pc2
:
16329 case T_MNEM_ldr_sp
:
16330 case T_MNEM_str_sp
:
16337 if (fragp
->fr_var
== 4)
16339 insn
= THUMB_OP32(opcode
);
16340 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
16342 insn
|= (old_op
& 0x700) << 4;
16346 insn
|= (old_op
& 7) << 12;
16347 insn
|= (old_op
& 0x38) << 13;
16349 insn
|= 0x00000c00;
16350 put_thumb32_insn (buf
, insn
);
16351 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
16355 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
16357 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
16360 if (fragp
->fr_var
== 4)
16362 insn
= THUMB_OP32 (opcode
);
16363 insn
|= (old_op
& 0xf0) << 4;
16364 put_thumb32_insn (buf
, insn
);
16365 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
16369 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16370 exp
.X_add_number
-= 4;
16378 if (fragp
->fr_var
== 4)
16380 int r0off
= (opcode
== T_MNEM_mov
16381 || opcode
== T_MNEM_movs
) ? 0 : 8;
16382 insn
= THUMB_OP32 (opcode
);
16383 insn
= (insn
& 0xe1ffffff) | 0x10000000;
16384 insn
|= (old_op
& 0x700) << r0off
;
16385 put_thumb32_insn (buf
, insn
);
16386 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
16390 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
16395 if (fragp
->fr_var
== 4)
16397 insn
= THUMB_OP32(opcode
);
16398 put_thumb32_insn (buf
, insn
);
16399 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
16402 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
16406 if (fragp
->fr_var
== 4)
16408 insn
= THUMB_OP32(opcode
);
16409 insn
|= (old_op
& 0xf00) << 14;
16410 put_thumb32_insn (buf
, insn
);
16411 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
16414 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
16417 case T_MNEM_add_sp
:
16418 case T_MNEM_add_pc
:
16419 case T_MNEM_inc_sp
:
16420 case T_MNEM_dec_sp
:
16421 if (fragp
->fr_var
== 4)
16423 /* ??? Choose between add and addw. */
16424 insn
= THUMB_OP32 (opcode
);
16425 insn
|= (old_op
& 0xf0) << 4;
16426 put_thumb32_insn (buf
, insn
);
16427 if (opcode
== T_MNEM_add_pc
)
16428 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
16430 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
16433 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16441 if (fragp
->fr_var
== 4)
16443 insn
= THUMB_OP32 (opcode
);
16444 insn
|= (old_op
& 0xf0) << 4;
16445 insn
|= (old_op
& 0xf) << 16;
16446 put_thumb32_insn (buf
, insn
);
16447 if (insn
& (1 << 20))
16448 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
16450 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
16453 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16459 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
16461 fixp
->fx_file
= fragp
->fr_file
;
16462 fixp
->fx_line
= fragp
->fr_line
;
16463 fragp
->fr_fix
+= fragp
->fr_var
;
16466 /* Return the size of a relaxable immediate operand instruction.
16467 SHIFT and SIZE specify the form of the allowable immediate. */
16469 relax_immediate (fragS
*fragp
, int size
, int shift
)
16475 /* ??? Should be able to do better than this. */
16476 if (fragp
->fr_symbol
)
16479 low
= (1 << shift
) - 1;
16480 mask
= (1 << (shift
+ size
)) - (1 << shift
);
16481 offset
= fragp
->fr_offset
;
16482 /* Force misaligned offsets to 32-bit variant. */
16485 if (offset
& ~mask
)
16490 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16493 relax_adr (fragS
*fragp
, asection
*sec
)
16498 /* Assume worst case for symbols not known to be in the same section. */
16499 if (!S_IS_DEFINED(fragp
->fr_symbol
)
16500 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
16503 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
16504 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
16505 addr
= (addr
+ 4) & ~3;
16506 /* Fix the insn as the 4-byte version if the target address is not
16507 sufficiently aligned. This is prevents an infinite loop when two
16508 instructions have contradictory range/alignment requirements. */
16512 if (val
< 0 || val
> 1020)
16517 /* Return the size of a relaxable add/sub immediate instruction. */
16519 relax_addsub (fragS
*fragp
, asection
*sec
)
16524 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
16525 op
= bfd_get_16(sec
->owner
, buf
);
16526 if ((op
& 0xf) == ((op
>> 4) & 0xf))
16527 return relax_immediate (fragp
, 8, 0);
16529 return relax_immediate (fragp
, 3, 0);
16533 /* Return the size of a relaxable branch instruction. BITS is the
16534 size of the offset field in the narrow instruction. */
16537 relax_branch (fragS
*fragp
, asection
*sec
, int bits
)
16543 /* Assume worst case for symbols not known to be in the same section. */
16544 if (!S_IS_DEFINED(fragp
->fr_symbol
)
16545 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
16548 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
16549 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
16552 /* Offset is a signed value *2 */
16554 if (val
>= limit
|| val
< -limit
)
16560 /* Relax a machine dependent frag. This returns the amount by which
16561 the current size of the frag should change. */
16564 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch ATTRIBUTE_UNUSED
)
16569 oldsize
= fragp
->fr_var
;
16570 switch (fragp
->fr_subtype
)
16572 case T_MNEM_ldr_pc2
:
16573 newsize
= relax_adr(fragp
, sec
);
16575 case T_MNEM_ldr_pc
:
16576 case T_MNEM_ldr_sp
:
16577 case T_MNEM_str_sp
:
16578 newsize
= relax_immediate(fragp
, 8, 2);
16582 newsize
= relax_immediate(fragp
, 5, 2);
16586 newsize
= relax_immediate(fragp
, 5, 1);
16590 newsize
= relax_immediate(fragp
, 5, 0);
16593 newsize
= relax_adr(fragp
, sec
);
16599 newsize
= relax_immediate(fragp
, 8, 0);
16602 newsize
= relax_branch(fragp
, sec
, 11);
16605 newsize
= relax_branch(fragp
, sec
, 8);
16607 case T_MNEM_add_sp
:
16608 case T_MNEM_add_pc
:
16609 newsize
= relax_immediate (fragp
, 8, 2);
16611 case T_MNEM_inc_sp
:
16612 case T_MNEM_dec_sp
:
16613 newsize
= relax_immediate (fragp
, 7, 2);
16619 newsize
= relax_addsub (fragp
, sec
);
16626 fragp
->fr_var
= -newsize
;
16627 md_convert_frag (sec
->owner
, sec
, fragp
);
16629 return -(newsize
+ oldsize
);
16631 fragp
->fr_var
= newsize
;
16632 return newsize
- oldsize
;
16635 /* Round up a section size to the appropriate boundary. */
16638 md_section_align (segT segment ATTRIBUTE_UNUSED
,
16641 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16642 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
16644 /* For a.out, force the section size to be aligned. If we don't do
16645 this, BFD will align it for us, but it will not write out the
16646 final bytes of the section. This may be a bug in BFD, but it is
16647 easier to fix it here since that is how the other a.out targets
16651 align
= bfd_get_section_alignment (stdoutput
, segment
);
16652 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
16659 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16660 of an rs_align_code fragment. */
16663 arm_handle_align (fragS
* fragP
)
16665 static char const arm_noop
[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16666 static char const thumb_noop
[2] = { 0xc0, 0x46 };
16667 static char const arm_bigend_noop
[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16668 static char const thumb_bigend_noop
[2] = { 0x46, 0xc0 };
16670 int bytes
, fix
, noop_size
;
16674 if (fragP
->fr_type
!= rs_align_code
)
16677 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
16678 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
16681 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
16682 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
16684 if (fragP
->tc_frag_data
)
16686 if (target_big_endian
)
16687 noop
= thumb_bigend_noop
;
16690 noop_size
= sizeof (thumb_noop
);
16694 if (target_big_endian
)
16695 noop
= arm_bigend_noop
;
16698 noop_size
= sizeof (arm_noop
);
16701 if (bytes
& (noop_size
- 1))
16703 fix
= bytes
& (noop_size
- 1);
16704 memset (p
, 0, fix
);
16709 while (bytes
>= noop_size
)
16711 memcpy (p
, noop
, noop_size
);
16713 bytes
-= noop_size
;
16717 fragP
->fr_fix
+= fix
;
16718 fragP
->fr_var
= noop_size
;
16721 /* Called from md_do_align. Used to create an alignment
16722 frag in a code section. */
16725 arm_frag_align_code (int n
, int max
)
16729 /* We assume that there will never be a requirement
16730 to support alignments greater than 32 bytes. */
16731 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
16732 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16734 p
= frag_var (rs_align_code
,
16735 MAX_MEM_FOR_RS_ALIGN_CODE
,
16737 (relax_substateT
) max
,
16744 /* Perform target specific initialisation of a frag. */
16747 arm_init_frag (fragS
* fragP
)
16749 /* Record whether this frag is in an ARM or a THUMB area. */
16750 fragP
->tc_frag_data
= thumb_mode
;
16754 /* When we change sections we need to issue a new mapping symbol. */
16757 arm_elf_change_section (void)
16760 segment_info_type
*seginfo
;
16762 /* Link an unlinked unwind index table section to the .text section. */
16763 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
16764 && elf_linked_to_section (now_seg
) == NULL
)
16765 elf_linked_to_section (now_seg
) = text_section
;
16767 if (!SEG_NORMAL (now_seg
))
16770 flags
= bfd_get_section_flags (stdoutput
, now_seg
);
16772 /* We can ignore sections that only contain debug info. */
16773 if ((flags
& SEC_ALLOC
) == 0)
16776 seginfo
= seg_info (now_seg
);
16777 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
16778 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
16782 arm_elf_section_type (const char * str
, size_t len
)
16784 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
16785 return SHT_ARM_EXIDX
;
16790 /* Code to deal with unwinding tables. */
16792 static void add_unwind_adjustsp (offsetT
);
16794 /* Cenerate and deferred unwind frame offset. */
16797 flush_pending_unwind (void)
16801 offset
= unwind
.pending_offset
;
16802 unwind
.pending_offset
= 0;
16804 add_unwind_adjustsp (offset
);
16807 /* Add an opcode to this list for this function. Two-byte opcodes should
16808 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16812 add_unwind_opcode (valueT op
, int length
)
16814 /* Add any deferred stack adjustment. */
16815 if (unwind
.pending_offset
)
16816 flush_pending_unwind ();
16818 unwind
.sp_restored
= 0;
16820 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
16822 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
16823 if (unwind
.opcodes
)
16824 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
16825 unwind
.opcode_alloc
);
16827 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
16832 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
16834 unwind
.opcode_count
++;
16838 /* Add unwind opcodes to adjust the stack pointer. */
16841 add_unwind_adjustsp (offsetT offset
)
16845 if (offset
> 0x200)
16847 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
16852 /* Long form: 0xb2, uleb128. */
16853 /* This might not fit in a word so add the individual bytes,
16854 remembering the list is built in reverse order. */
16855 o
= (valueT
) ((offset
- 0x204) >> 2);
16857 add_unwind_opcode (0, 1);
16859 /* Calculate the uleb128 encoding of the offset. */
16863 bytes
[n
] = o
& 0x7f;
16869 /* Add the insn. */
16871 add_unwind_opcode (bytes
[n
- 1], 1);
16872 add_unwind_opcode (0xb2, 1);
16874 else if (offset
> 0x100)
16876 /* Two short opcodes. */
16877 add_unwind_opcode (0x3f, 1);
16878 op
= (offset
- 0x104) >> 2;
16879 add_unwind_opcode (op
, 1);
16881 else if (offset
> 0)
16883 /* Short opcode. */
16884 op
= (offset
- 4) >> 2;
16885 add_unwind_opcode (op
, 1);
16887 else if (offset
< 0)
16890 while (offset
> 0x100)
16892 add_unwind_opcode (0x7f, 1);
16895 op
= ((offset
- 4) >> 2) | 0x40;
16896 add_unwind_opcode (op
, 1);
16900 /* Finish the list of unwind opcodes for this function. */
16902 finish_unwind_opcodes (void)
16906 if (unwind
.fp_used
)
16908 /* Adjust sp as necessary. */
16909 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
16910 flush_pending_unwind ();
16912 /* After restoring sp from the frame pointer. */
16913 op
= 0x90 | unwind
.fp_reg
;
16914 add_unwind_opcode (op
, 1);
16917 flush_pending_unwind ();
16921 /* Start an exception table entry. If idx is nonzero this is an index table
16925 start_unwind_section (const segT text_seg
, int idx
)
16927 const char * text_name
;
16928 const char * prefix
;
16929 const char * prefix_once
;
16930 const char * group_name
;
16934 size_t sec_name_len
;
16941 prefix
= ELF_STRING_ARM_unwind
;
16942 prefix_once
= ELF_STRING_ARM_unwind_once
;
16943 type
= SHT_ARM_EXIDX
;
16947 prefix
= ELF_STRING_ARM_unwind_info
;
16948 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
16949 type
= SHT_PROGBITS
;
16952 text_name
= segment_name (text_seg
);
16953 if (streq (text_name
, ".text"))
16956 if (strncmp (text_name
, ".gnu.linkonce.t.",
16957 strlen (".gnu.linkonce.t.")) == 0)
16959 prefix
= prefix_once
;
16960 text_name
+= strlen (".gnu.linkonce.t.");
16963 prefix_len
= strlen (prefix
);
16964 text_len
= strlen (text_name
);
16965 sec_name_len
= prefix_len
+ text_len
;
16966 sec_name
= xmalloc (sec_name_len
+ 1);
16967 memcpy (sec_name
, prefix
, prefix_len
);
16968 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
16969 sec_name
[prefix_len
+ text_len
] = '\0';
16975 /* Handle COMDAT group. */
16976 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
16978 group_name
= elf_group_name (text_seg
);
16979 if (group_name
== NULL
)
16981 as_bad ("Group section `%s' has no group signature",
16982 segment_name (text_seg
));
16983 ignore_rest_of_line ();
16986 flags
|= SHF_GROUP
;
16990 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
16992 /* Set the setion link for index tables. */
16994 elf_linked_to_section (now_seg
) = text_seg
;
16998 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
16999 personality routine data. Returns zero, or the index table value for
17000 and inline entry. */
17003 create_unwind_entry (int have_data
)
17008 /* The current word of data. */
17010 /* The number of bytes left in this word. */
17013 finish_unwind_opcodes ();
17015 /* Remember the current text section. */
17016 unwind
.saved_seg
= now_seg
;
17017 unwind
.saved_subseg
= now_subseg
;
17019 start_unwind_section (now_seg
, 0);
17021 if (unwind
.personality_routine
== NULL
)
17023 if (unwind
.personality_index
== -2)
17026 as_bad (_("handerdata in cantunwind frame"));
17027 return 1; /* EXIDX_CANTUNWIND. */
17030 /* Use a default personality routine if none is specified. */
17031 if (unwind
.personality_index
== -1)
17033 if (unwind
.opcode_count
> 3)
17034 unwind
.personality_index
= 1;
17036 unwind
.personality_index
= 0;
17039 /* Space for the personality routine entry. */
17040 if (unwind
.personality_index
== 0)
17042 if (unwind
.opcode_count
> 3)
17043 as_bad (_("too many unwind opcodes for personality routine 0"));
17047 /* All the data is inline in the index table. */
17050 while (unwind
.opcode_count
> 0)
17052 unwind
.opcode_count
--;
17053 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
17057 /* Pad with "finish" opcodes. */
17059 data
= (data
<< 8) | 0xb0;
17066 /* We get two opcodes "free" in the first word. */
17067 size
= unwind
.opcode_count
- 2;
17070 /* An extra byte is required for the opcode count. */
17071 size
= unwind
.opcode_count
+ 1;
17073 size
= (size
+ 3) >> 2;
17075 as_bad (_("too many unwind opcodes"));
17077 frag_align (2, 0, 0);
17078 record_alignment (now_seg
, 2);
17079 unwind
.table_entry
= expr_build_dot ();
17081 /* Allocate the table entry. */
17082 ptr
= frag_more ((size
<< 2) + 4);
17083 where
= frag_now_fix () - ((size
<< 2) + 4);
17085 switch (unwind
.personality_index
)
17088 /* ??? Should this be a PLT generating relocation? */
17089 /* Custom personality routine. */
17090 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
17091 BFD_RELOC_ARM_PREL31
);
17096 /* Set the first byte to the number of additional words. */
17101 /* ABI defined personality routines. */
17103 /* Three opcodes bytes are packed into the first word. */
17110 /* The size and first two opcode bytes go in the first word. */
17111 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
17116 /* Should never happen. */
17120 /* Pack the opcodes into words (MSB first), reversing the list at the same
17122 while (unwind
.opcode_count
> 0)
17126 md_number_to_chars (ptr
, data
, 4);
17131 unwind
.opcode_count
--;
17133 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
17136 /* Finish off the last word. */
17139 /* Pad with "finish" opcodes. */
17141 data
= (data
<< 8) | 0xb0;
17143 md_number_to_chars (ptr
, data
, 4);
17148 /* Add an empty descriptor if there is no user-specified data. */
17149 ptr
= frag_more (4);
17150 md_number_to_chars (ptr
, 0, 4);
17157 /* Initialize the DWARF-2 unwind information for this procedure. */
17160 tc_arm_frame_initial_instructions (void)
17162 cfi_add_CFA_def_cfa (REG_SP
, 0);
17164 #endif /* OBJ_ELF */
17166 /* Convert REGNAME to a DWARF-2 register number. */
17169 tc_arm_regname_to_dw2regnum (char *regname
)
17171 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
17181 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
17185 expr
.X_op
= O_secrel
;
17186 expr
.X_add_symbol
= symbol
;
17187 expr
.X_add_number
= 0;
17188 emit_expr (&expr
, size
);
17192 /* MD interface: Symbol and relocation handling. */
17194 /* Return the address within the segment that a PC-relative fixup is
17195 relative to. For ARM, PC-relative fixups applied to instructions
17196 are generally relative to the location of the fixup plus 8 bytes.
17197 Thumb branches are offset by 4, and Thumb loads relative to PC
17198 require special handling. */
17201 md_pcrel_from_section (fixS
* fixP
, segT seg
)
17203 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
17205 /* If this is pc-relative and we are going to emit a relocation
17206 then we just want to put out any pipeline compensation that the linker
17207 will need. Otherwise we want to use the calculated base.
17208 For WinCE we skip the bias for externals as well, since this
17209 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17211 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
17212 || (arm_force_relocation (fixP
)
17214 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
17219 switch (fixP
->fx_r_type
)
17221 /* PC relative addressing on the Thumb is slightly odd as the
17222 bottom two bits of the PC are forced to zero for the
17223 calculation. This happens *after* application of the
17224 pipeline offset. However, Thumb adrl already adjusts for
17225 this, so we need not do it again. */
17226 case BFD_RELOC_ARM_THUMB_ADD
:
17229 case BFD_RELOC_ARM_THUMB_OFFSET
:
17230 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
17231 case BFD_RELOC_ARM_T32_ADD_PC12
:
17232 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
17233 return (base
+ 4) & ~3;
17235 /* Thumb branches are simply offset by +4. */
17236 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
17237 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
17238 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
17239 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
17240 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
17241 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
17242 case BFD_RELOC_THUMB_PCREL_BLX
:
17245 /* ARM mode branches are offset by +8. However, the Windows CE
17246 loader expects the relocation not to take this into account. */
17247 case BFD_RELOC_ARM_PCREL_BRANCH
:
17248 case BFD_RELOC_ARM_PCREL_CALL
:
17249 case BFD_RELOC_ARM_PCREL_JUMP
:
17250 case BFD_RELOC_ARM_PCREL_BLX
:
17251 case BFD_RELOC_ARM_PLT32
:
17253 /* When handling fixups immediately, because we have already
17254 discovered the value of a symbol, or the address of the frag involved
17255 we must account for the offset by +8, as the OS loader will never see the reloc.
17256 see fixup_segment() in write.c
17257 The S_IS_EXTERNAL test handles the case of global symbols.
17258 Those need the calculated base, not just the pipe compensation the linker will need. */
17260 && fixP
->fx_addsy
!= NULL
17261 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
17262 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
17269 /* ARM mode loads relative to PC are also offset by +8. Unlike
17270 branches, the Windows CE loader *does* expect the relocation
17271 to take this into account. */
17272 case BFD_RELOC_ARM_OFFSET_IMM
:
17273 case BFD_RELOC_ARM_OFFSET_IMM8
:
17274 case BFD_RELOC_ARM_HWLITERAL
:
17275 case BFD_RELOC_ARM_LITERAL
:
17276 case BFD_RELOC_ARM_CP_OFF_IMM
:
17280 /* Other PC-relative relocations are un-offset. */
17286 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17287 Otherwise we have no need to default values of symbols. */
17290 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
17293 if (name
[0] == '_' && name
[1] == 'G'
17294 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
17298 if (symbol_find (name
))
17299 as_bad ("GOT already in the symbol table");
17301 GOT_symbol
= symbol_new (name
, undefined_section
,
17302 (valueT
) 0, & zero_address_frag
);
17312 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17313 computed as two separate immediate values, added together. We
17314 already know that this value cannot be computed by just one ARM
17317 static unsigned int
17318 validate_immediate_twopart (unsigned int val
,
17319 unsigned int * highpart
)
17324 for (i
= 0; i
< 32; i
+= 2)
17325 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
17331 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
17333 else if (a
& 0xff0000)
17335 if (a
& 0xff000000)
17337 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
17341 assert (a
& 0xff000000);
17342 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
17345 return (a
& 0xff) | (i
<< 7);
17352 validate_offset_imm (unsigned int val
, int hwse
)
17354 if ((hwse
&& val
> 255) || val
> 4095)
17359 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17360 negative immediate constant by altering the instruction. A bit of
17365 by inverting the second operand, and
17368 by negating the second operand. */
17371 negate_data_op (unsigned long * instruction
,
17372 unsigned long value
)
17375 unsigned long negated
, inverted
;
17377 negated
= encode_arm_immediate (-value
);
17378 inverted
= encode_arm_immediate (~value
);
17380 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
17383 /* First negates. */
17384 case OPCODE_SUB
: /* ADD <-> SUB */
17385 new_inst
= OPCODE_ADD
;
17390 new_inst
= OPCODE_SUB
;
17394 case OPCODE_CMP
: /* CMP <-> CMN */
17395 new_inst
= OPCODE_CMN
;
17400 new_inst
= OPCODE_CMP
;
17404 /* Now Inverted ops. */
17405 case OPCODE_MOV
: /* MOV <-> MVN */
17406 new_inst
= OPCODE_MVN
;
17411 new_inst
= OPCODE_MOV
;
17415 case OPCODE_AND
: /* AND <-> BIC */
17416 new_inst
= OPCODE_BIC
;
17421 new_inst
= OPCODE_AND
;
17425 case OPCODE_ADC
: /* ADC <-> SBC */
17426 new_inst
= OPCODE_SBC
;
17431 new_inst
= OPCODE_ADC
;
17435 /* We cannot do anything. */
17440 if (value
== (unsigned) FAIL
)
17443 *instruction
&= OPCODE_MASK
;
17444 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
17448 /* Like negate_data_op, but for Thumb-2. */
17450 static unsigned int
17451 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
17455 unsigned int negated
, inverted
;
17457 negated
= encode_thumb32_immediate (-value
);
17458 inverted
= encode_thumb32_immediate (~value
);
17460 rd
= (*instruction
>> 8) & 0xf;
17461 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
17464 /* ADD <-> SUB. Includes CMP <-> CMN. */
17465 case T2_OPCODE_SUB
:
17466 new_inst
= T2_OPCODE_ADD
;
17470 case T2_OPCODE_ADD
:
17471 new_inst
= T2_OPCODE_SUB
;
17475 /* ORR <-> ORN. Includes MOV <-> MVN. */
17476 case T2_OPCODE_ORR
:
17477 new_inst
= T2_OPCODE_ORN
;
17481 case T2_OPCODE_ORN
:
17482 new_inst
= T2_OPCODE_ORR
;
17486 /* AND <-> BIC. TST has no inverted equivalent. */
17487 case T2_OPCODE_AND
:
17488 new_inst
= T2_OPCODE_BIC
;
17495 case T2_OPCODE_BIC
:
17496 new_inst
= T2_OPCODE_AND
;
17501 case T2_OPCODE_ADC
:
17502 new_inst
= T2_OPCODE_SBC
;
17506 case T2_OPCODE_SBC
:
17507 new_inst
= T2_OPCODE_ADC
;
17511 /* We cannot do anything. */
17516 if (value
== (unsigned int)FAIL
)
17519 *instruction
&= T2_OPCODE_MASK
;
17520 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
17524 /* Read a 32-bit thumb instruction from buf. */
17525 static unsigned long
17526 get_thumb32_insn (char * buf
)
17528 unsigned long insn
;
17529 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
17530 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17536 /* We usually want to set the low bit on the address of thumb function
17537 symbols. In particular .word foo - . should have the low bit set.
17538 Generic code tries to fold the difference of two symbols to
17539 a constant. Prevent this and force a relocation when the first symbols
17540 is a thumb function. */
17542 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
17544 if (op
== O_subtract
17545 && l
->X_op
== O_symbol
17546 && r
->X_op
== O_symbol
17547 && THUMB_IS_FUNC (l
->X_add_symbol
))
17549 l
->X_op
= O_subtract
;
17550 l
->X_op_symbol
= r
->X_add_symbol
;
17551 l
->X_add_number
-= r
->X_add_number
;
17554 /* Process as normal. */
17559 md_apply_fix (fixS
* fixP
,
17563 offsetT value
= * valP
;
17565 unsigned int newimm
;
17566 unsigned long temp
;
17568 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
17570 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
17572 /* Note whether this will delete the relocation. */
17574 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
17577 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17578 consistency with the behavior on 32-bit hosts. Remember value
17580 value
&= 0xffffffff;
17581 value
^= 0x80000000;
17582 value
-= 0x80000000;
17585 fixP
->fx_addnumber
= value
;
17587 /* Same treatment for fixP->fx_offset. */
17588 fixP
->fx_offset
&= 0xffffffff;
17589 fixP
->fx_offset
^= 0x80000000;
17590 fixP
->fx_offset
-= 0x80000000;
17592 switch (fixP
->fx_r_type
)
17594 case BFD_RELOC_NONE
:
17595 /* This will need to go in the object file. */
17599 case BFD_RELOC_ARM_IMMEDIATE
:
17600 /* We claim that this fixup has been processed here,
17601 even if in fact we generate an error because we do
17602 not have a reloc for it, so tc_gen_reloc will reject it. */
17606 && ! S_IS_DEFINED (fixP
->fx_addsy
))
17608 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17609 _("undefined symbol %s used as an immediate value"),
17610 S_GET_NAME (fixP
->fx_addsy
));
17614 newimm
= encode_arm_immediate (value
);
17615 temp
= md_chars_to_number (buf
, INSN_SIZE
);
17617 /* If the instruction will fail, see if we can fix things up by
17618 changing the opcode. */
17619 if (newimm
== (unsigned int) FAIL
17620 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
17622 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17623 _("invalid constant (%lx) after fixup"),
17624 (unsigned long) value
);
17628 newimm
|= (temp
& 0xfffff000);
17629 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
17632 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
17634 unsigned int highpart
= 0;
17635 unsigned int newinsn
= 0xe1a00000; /* nop. */
17637 newimm
= encode_arm_immediate (value
);
17638 temp
= md_chars_to_number (buf
, INSN_SIZE
);
17640 /* If the instruction will fail, see if we can fix things up by
17641 changing the opcode. */
17642 if (newimm
== (unsigned int) FAIL
17643 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
17645 /* No ? OK - try using two ADD instructions to generate
17647 newimm
= validate_immediate_twopart (value
, & highpart
);
17649 /* Yes - then make sure that the second instruction is
17651 if (newimm
!= (unsigned int) FAIL
)
17653 /* Still No ? Try using a negated value. */
17654 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
17655 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
17656 /* Otherwise - give up. */
17659 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17660 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17665 /* Replace the first operand in the 2nd instruction (which
17666 is the PC) with the destination register. We have
17667 already added in the PC in the first instruction and we
17668 do not want to do it again. */
17669 newinsn
&= ~ 0xf0000;
17670 newinsn
|= ((newinsn
& 0x0f000) << 4);
17673 newimm
|= (temp
& 0xfffff000);
17674 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
17676 highpart
|= (newinsn
& 0xfffff000);
17677 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
17681 case BFD_RELOC_ARM_OFFSET_IMM
:
17682 if (!fixP
->fx_done
&& seg
->use_rela_p
)
17685 case BFD_RELOC_ARM_LITERAL
:
17691 if (validate_offset_imm (value
, 0) == FAIL
)
17693 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
17694 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17695 _("invalid literal constant: pool needs to be closer"));
17697 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17698 _("bad immediate value for offset (%ld)"),
17703 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17704 newval
&= 0xff7ff000;
17705 newval
|= value
| (sign
? INDEX_UP
: 0);
17706 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17709 case BFD_RELOC_ARM_OFFSET_IMM8
:
17710 case BFD_RELOC_ARM_HWLITERAL
:
17716 if (validate_offset_imm (value
, 1) == FAIL
)
17718 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
17719 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17720 _("invalid literal constant: pool needs to be closer"));
17722 as_bad (_("bad immediate value for half-word offset (%ld)"),
17727 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17728 newval
&= 0xff7ff0f0;
17729 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
17730 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17733 case BFD_RELOC_ARM_T32_OFFSET_U8
:
17734 if (value
< 0 || value
> 1020 || value
% 4 != 0)
17735 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17736 _("bad immediate value for offset (%ld)"), (long) value
);
17739 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
17741 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
17744 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
17745 /* This is a complicated relocation used for all varieties of Thumb32
17746 load/store instruction with immediate offset:
17748 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17749 *4, optional writeback(W)
17750 (doubleword load/store)
17752 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17753 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17754 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17755 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17756 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17758 Uppercase letters indicate bits that are already encoded at
17759 this point. Lowercase letters are our problem. For the
17760 second block of instructions, the secondary opcode nybble
17761 (bits 8..11) is present, and bit 23 is zero, even if this is
17762 a PC-relative operation. */
17763 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17765 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
17767 if ((newval
& 0xf0000000) == 0xe0000000)
17769 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17771 newval
|= (1 << 23);
17774 if (value
% 4 != 0)
17776 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17777 _("offset not a multiple of 4"));
17783 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17784 _("offset out of range"));
17789 else if ((newval
& 0x000f0000) == 0x000f0000)
17791 /* PC-relative, 12-bit offset. */
17793 newval
|= (1 << 23);
17798 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17799 _("offset out of range"));
17804 else if ((newval
& 0x00000100) == 0x00000100)
17806 /* Writeback: 8-bit, +/- offset. */
17808 newval
|= (1 << 9);
17813 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17814 _("offset out of range"));
17819 else if ((newval
& 0x00000f00) == 0x00000e00)
17821 /* T-instruction: positive 8-bit offset. */
17822 if (value
< 0 || value
> 0xff)
17824 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17825 _("offset out of range"));
17833 /* Positive 12-bit or negative 8-bit offset. */
17837 newval
|= (1 << 23);
17847 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17848 _("offset out of range"));
17855 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
17856 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
17859 case BFD_RELOC_ARM_SHIFT_IMM
:
17860 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17861 if (((unsigned long) value
) > 32
17863 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
17865 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17866 _("shift expression is too large"));
17871 /* Shifts of zero must be done as lsl. */
17873 else if (value
== 32)
17875 newval
&= 0xfffff07f;
17876 newval
|= (value
& 0x1f) << 7;
17877 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17880 case BFD_RELOC_ARM_T32_IMMEDIATE
:
17881 case BFD_RELOC_ARM_T32_ADD_IMM
:
17882 case BFD_RELOC_ARM_T32_IMM12
:
17883 case BFD_RELOC_ARM_T32_ADD_PC12
:
17884 /* We claim that this fixup has been processed here,
17885 even if in fact we generate an error because we do
17886 not have a reloc for it, so tc_gen_reloc will reject it. */
17890 && ! S_IS_DEFINED (fixP
->fx_addsy
))
17892 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17893 _("undefined symbol %s used as an immediate value"),
17894 S_GET_NAME (fixP
->fx_addsy
));
17898 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17900 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
17903 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
17904 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
17906 newimm
= encode_thumb32_immediate (value
);
17907 if (newimm
== (unsigned int) FAIL
)
17908 newimm
= thumb32_negate_data_op (&newval
, value
);
17910 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
17911 && newimm
== (unsigned int) FAIL
)
17913 /* Turn add/sum into addw/subw. */
17914 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
17915 newval
= (newval
& 0xfeffffff) | 0x02000000;
17917 /* 12 bit immediate for addw/subw. */
17921 newval
^= 0x00a00000;
17924 newimm
= (unsigned int) FAIL
;
17929 if (newimm
== (unsigned int)FAIL
)
17931 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17932 _("invalid constant (%lx) after fixup"),
17933 (unsigned long) value
);
17937 newval
|= (newimm
& 0x800) << 15;
17938 newval
|= (newimm
& 0x700) << 4;
17939 newval
|= (newimm
& 0x0ff);
17941 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
17942 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
17945 case BFD_RELOC_ARM_SMC
:
17946 if (((unsigned long) value
) > 0xffff)
17947 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17948 _("invalid smc expression"));
17949 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17950 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
17951 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17954 case BFD_RELOC_ARM_SWI
:
17955 if (fixP
->tc_fix_data
!= 0)
17957 if (((unsigned long) value
) > 0xff)
17958 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17959 _("invalid swi expression"));
17960 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17962 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17966 if (((unsigned long) value
) > 0x00ffffff)
17967 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17968 _("invalid swi expression"));
17969 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17971 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17975 case BFD_RELOC_ARM_MULTI
:
17976 if (((unsigned long) value
) > 0xffff)
17977 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17978 _("invalid expression in load/store multiple"));
17979 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
17980 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17984 case BFD_RELOC_ARM_PCREL_CALL
:
17985 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17986 if ((newval
& 0xf0000000) == 0xf0000000)
17990 goto arm_branch_common
;
17992 case BFD_RELOC_ARM_PCREL_JUMP
:
17993 case BFD_RELOC_ARM_PLT32
:
17995 case BFD_RELOC_ARM_PCREL_BRANCH
:
17997 goto arm_branch_common
;
17999 case BFD_RELOC_ARM_PCREL_BLX
:
18002 /* We are going to store value (shifted right by two) in the
18003 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18004 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18005 also be be clear. */
18007 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18008 _("misaligned branch destination"));
18009 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
18010 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
18011 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18012 _("branch out of range"));
18014 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18016 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18017 newval
|= (value
>> 2) & 0x00ffffff;
18018 /* Set the H bit on BLX instructions. */
18022 newval
|= 0x01000000;
18024 newval
&= ~0x01000000;
18026 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18030 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
18031 /* CBZ can only branch forward. */
18033 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18034 _("branch out of range"));
18036 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18038 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18039 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
18040 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18044 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
18045 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
18046 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18047 _("branch out of range"));
18049 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18051 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18052 newval
|= (value
& 0x1ff) >> 1;
18053 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18057 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
18058 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
18059 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18060 _("branch out of range"));
18062 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18064 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18065 newval
|= (value
& 0xfff) >> 1;
18066 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18070 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
18071 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
18072 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18073 _("conditional branch out of range"));
18075 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18078 addressT S
, J1
, J2
, lo
, hi
;
18080 S
= (value
& 0x00100000) >> 20;
18081 J2
= (value
& 0x00080000) >> 19;
18082 J1
= (value
& 0x00040000) >> 18;
18083 hi
= (value
& 0x0003f000) >> 12;
18084 lo
= (value
& 0x00000ffe) >> 1;
18086 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18087 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18088 newval
|= (S
<< 10) | hi
;
18089 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
18090 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18091 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18095 case BFD_RELOC_THUMB_PCREL_BLX
:
18096 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
18097 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
18098 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18099 _("branch out of range"));
18101 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
18102 /* For a BLX instruction, make sure that the relocation is rounded up
18103 to a word boundary. This follows the semantics of the instruction
18104 which specifies that bit 1 of the target address will come from bit
18105 1 of the base address. */
18106 value
= (value
+ 1) & ~ 1;
18108 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18112 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18113 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18114 newval
|= (value
& 0x7fffff) >> 12;
18115 newval2
|= (value
& 0xfff) >> 1;
18116 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18117 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18121 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
18122 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
18123 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18124 _("branch out of range"));
18126 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18129 addressT S
, I1
, I2
, lo
, hi
;
18131 S
= (value
& 0x01000000) >> 24;
18132 I1
= (value
& 0x00800000) >> 23;
18133 I2
= (value
& 0x00400000) >> 22;
18134 hi
= (value
& 0x003ff000) >> 12;
18135 lo
= (value
& 0x00000ffe) >> 1;
18140 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18141 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18142 newval
|= (S
<< 10) | hi
;
18143 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
18144 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18145 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18150 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18151 md_number_to_chars (buf
, value
, 1);
18155 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18156 md_number_to_chars (buf
, value
, 2);
18160 case BFD_RELOC_ARM_TLS_GD32
:
18161 case BFD_RELOC_ARM_TLS_LE32
:
18162 case BFD_RELOC_ARM_TLS_IE32
:
18163 case BFD_RELOC_ARM_TLS_LDM32
:
18164 case BFD_RELOC_ARM_TLS_LDO32
:
18165 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
18168 case BFD_RELOC_ARM_GOT32
:
18169 case BFD_RELOC_ARM_GOTOFF
:
18170 case BFD_RELOC_ARM_TARGET2
:
18171 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18172 md_number_to_chars (buf
, 0, 4);
18176 case BFD_RELOC_RVA
:
18178 case BFD_RELOC_ARM_TARGET1
:
18179 case BFD_RELOC_ARM_ROSEGREL32
:
18180 case BFD_RELOC_ARM_SBREL32
:
18181 case BFD_RELOC_32_PCREL
:
18183 case BFD_RELOC_32_SECREL
:
18185 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18187 /* For WinCE we only do this for pcrel fixups. */
18188 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
18190 md_number_to_chars (buf
, value
, 4);
18194 case BFD_RELOC_ARM_PREL31
:
18195 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18197 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
18198 if ((value
^ (value
>> 1)) & 0x40000000)
18200 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18201 _("rel31 relocation overflow"));
18203 newval
|= value
& 0x7fffffff;
18204 md_number_to_chars (buf
, newval
, 4);
18209 case BFD_RELOC_ARM_CP_OFF_IMM
:
18210 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
18211 if (value
< -1023 || value
> 1023 || (value
& 3))
18212 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18213 _("co-processor offset out of range"));
18218 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
18219 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
18220 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18222 newval
= get_thumb32_insn (buf
);
18223 newval
&= 0xff7fff00;
18224 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
18225 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
18226 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
18227 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18229 put_thumb32_insn (buf
, newval
);
18232 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
18233 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
18234 if (value
< -255 || value
> 255)
18235 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18236 _("co-processor offset out of range"));
18238 goto cp_off_common
;
18240 case BFD_RELOC_ARM_THUMB_OFFSET
:
18241 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18242 /* Exactly what ranges, and where the offset is inserted depends
18243 on the type of instruction, we can establish this from the
18245 switch (newval
>> 12)
18247 case 4: /* PC load. */
18248 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18249 forced to zero for these loads; md_pcrel_from has already
18250 compensated for this. */
18252 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18253 _("invalid offset, target not word aligned (0x%08lX)"),
18254 (((unsigned long) fixP
->fx_frag
->fr_address
18255 + (unsigned long) fixP
->fx_where
) & ~3)
18256 + (unsigned long) value
);
18258 if (value
& ~0x3fc)
18259 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18260 _("invalid offset, value too big (0x%08lX)"),
18263 newval
|= value
>> 2;
18266 case 9: /* SP load/store. */
18267 if (value
& ~0x3fc)
18268 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18269 _("invalid offset, value too big (0x%08lX)"),
18271 newval
|= value
>> 2;
18274 case 6: /* Word load/store. */
18276 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18277 _("invalid offset, value too big (0x%08lX)"),
18279 newval
|= value
<< 4; /* 6 - 2. */
18282 case 7: /* Byte load/store. */
18284 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18285 _("invalid offset, value too big (0x%08lX)"),
18287 newval
|= value
<< 6;
18290 case 8: /* Halfword load/store. */
18292 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18293 _("invalid offset, value too big (0x%08lX)"),
18295 newval
|= value
<< 5; /* 6 - 1. */
18299 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18300 "Unable to process relocation for thumb opcode: %lx",
18301 (unsigned long) newval
);
18304 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18307 case BFD_RELOC_ARM_THUMB_ADD
:
18308 /* This is a complicated relocation, since we use it for all of
18309 the following immediate relocations:
18313 9bit ADD/SUB SP word-aligned
18314 10bit ADD PC/SP word-aligned
18316 The type of instruction being processed is encoded in the
18323 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18325 int rd
= (newval
>> 4) & 0xf;
18326 int rs
= newval
& 0xf;
18327 int subtract
= !!(newval
& 0x8000);
18329 /* Check for HI regs, only very restricted cases allowed:
18330 Adjusting SP, and using PC or SP to get an address. */
18331 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
18332 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
18333 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18334 _("invalid Hi register with immediate"));
18336 /* If value is negative, choose the opposite instruction. */
18340 subtract
= !subtract
;
18342 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18343 _("immediate value out of range"));
18348 if (value
& ~0x1fc)
18349 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18350 _("invalid immediate for stack address calculation"));
18351 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
18352 newval
|= value
>> 2;
18354 else if (rs
== REG_PC
|| rs
== REG_SP
)
18356 if (subtract
|| value
& ~0x3fc)
18357 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18358 _("invalid immediate for address calculation (value = 0x%08lX)"),
18359 (unsigned long) value
);
18360 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
18362 newval
|= value
>> 2;
18367 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18368 _("immediate value out of range"));
18369 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
18370 newval
|= (rd
<< 8) | value
;
18375 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18376 _("immediate value out of range"));
18377 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
18378 newval
|= rd
| (rs
<< 3) | (value
<< 6);
18381 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18384 case BFD_RELOC_ARM_THUMB_IMM
:
18385 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18386 if (value
< 0 || value
> 255)
18387 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18388 _("invalid immediate: %ld is too large"),
18391 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18394 case BFD_RELOC_ARM_THUMB_SHIFT
:
18395 /* 5bit shift value (0..32). LSL cannot take 32. */
18396 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
18397 temp
= newval
& 0xf800;
18398 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
18399 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18400 _("invalid shift value: %ld"), (long) value
);
18401 /* Shifts of zero must be encoded as LSL. */
18403 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
18404 /* Shifts of 32 are encoded as zero. */
18405 else if (value
== 32)
18407 newval
|= value
<< 6;
18408 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18411 case BFD_RELOC_VTABLE_INHERIT
:
18412 case BFD_RELOC_VTABLE_ENTRY
:
18416 case BFD_RELOC_ARM_MOVW
:
18417 case BFD_RELOC_ARM_MOVT
:
18418 case BFD_RELOC_ARM_THUMB_MOVW
:
18419 case BFD_RELOC_ARM_THUMB_MOVT
:
18420 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18422 /* REL format relocations are limited to a 16-bit addend. */
18423 if (!fixP
->fx_done
)
18425 if (value
< -0x1000 || value
> 0xffff)
18426 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18427 _("offset too big"));
18429 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
18430 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
18435 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
18436 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
18438 newval
= get_thumb32_insn (buf
);
18439 newval
&= 0xfbf08f00;
18440 newval
|= (value
& 0xf000) << 4;
18441 newval
|= (value
& 0x0800) << 15;
18442 newval
|= (value
& 0x0700) << 4;
18443 newval
|= (value
& 0x00ff);
18444 put_thumb32_insn (buf
, newval
);
18448 newval
= md_chars_to_number (buf
, 4);
18449 newval
&= 0xfff0f000;
18450 newval
|= value
& 0x0fff;
18451 newval
|= (value
& 0xf000) << 4;
18452 md_number_to_chars (buf
, newval
, 4);
18457 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
18458 case BFD_RELOC_ARM_ALU_PC_G0
:
18459 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
18460 case BFD_RELOC_ARM_ALU_PC_G1
:
18461 case BFD_RELOC_ARM_ALU_PC_G2
:
18462 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
18463 case BFD_RELOC_ARM_ALU_SB_G0
:
18464 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
18465 case BFD_RELOC_ARM_ALU_SB_G1
:
18466 case BFD_RELOC_ARM_ALU_SB_G2
:
18467 assert (!fixP
->fx_done
);
18468 if (!seg
->use_rela_p
)
18471 bfd_vma encoded_addend
;
18472 bfd_vma addend_abs
= abs (value
);
18474 /* Check that the absolute value of the addend can be
18475 expressed as an 8-bit constant plus a rotation. */
18476 encoded_addend
= encode_arm_immediate (addend_abs
);
18477 if (encoded_addend
== (unsigned int) FAIL
)
18478 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18479 _("the offset 0x%08lX is not representable"),
18482 /* Extract the instruction. */
18483 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18485 /* If the addend is positive, use an ADD instruction.
18486 Otherwise use a SUB. Take care not to destroy the S bit. */
18487 insn
&= 0xff1fffff;
18493 /* Place the encoded addend into the first 12 bits of the
18495 insn
&= 0xfffff000;
18496 insn
|= encoded_addend
;
18498 /* Update the instruction. */
18499 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18503 case BFD_RELOC_ARM_LDR_PC_G0
:
18504 case BFD_RELOC_ARM_LDR_PC_G1
:
18505 case BFD_RELOC_ARM_LDR_PC_G2
:
18506 case BFD_RELOC_ARM_LDR_SB_G0
:
18507 case BFD_RELOC_ARM_LDR_SB_G1
:
18508 case BFD_RELOC_ARM_LDR_SB_G2
:
18509 assert (!fixP
->fx_done
);
18510 if (!seg
->use_rela_p
)
18513 bfd_vma addend_abs
= abs (value
);
18515 /* Check that the absolute value of the addend can be
18516 encoded in 12 bits. */
18517 if (addend_abs
>= 0x1000)
18518 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18519 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18522 /* Extract the instruction. */
18523 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18525 /* If the addend is negative, clear bit 23 of the instruction.
18526 Otherwise set it. */
18528 insn
&= ~(1 << 23);
18532 /* Place the absolute value of the addend into the first 12 bits
18533 of the instruction. */
18534 insn
&= 0xfffff000;
18535 insn
|= addend_abs
;
18537 /* Update the instruction. */
18538 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18542 case BFD_RELOC_ARM_LDRS_PC_G0
:
18543 case BFD_RELOC_ARM_LDRS_PC_G1
:
18544 case BFD_RELOC_ARM_LDRS_PC_G2
:
18545 case BFD_RELOC_ARM_LDRS_SB_G0
:
18546 case BFD_RELOC_ARM_LDRS_SB_G1
:
18547 case BFD_RELOC_ARM_LDRS_SB_G2
:
18548 assert (!fixP
->fx_done
);
18549 if (!seg
->use_rela_p
)
18552 bfd_vma addend_abs
= abs (value
);
18554 /* Check that the absolute value of the addend can be
18555 encoded in 8 bits. */
18556 if (addend_abs
>= 0x100)
18557 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18558 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18561 /* Extract the instruction. */
18562 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18564 /* If the addend is negative, clear bit 23 of the instruction.
18565 Otherwise set it. */
18567 insn
&= ~(1 << 23);
18571 /* Place the first four bits of the absolute value of the addend
18572 into the first 4 bits of the instruction, and the remaining
18573 four into bits 8 .. 11. */
18574 insn
&= 0xfffff0f0;
18575 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
18577 /* Update the instruction. */
18578 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18582 case BFD_RELOC_ARM_LDC_PC_G0
:
18583 case BFD_RELOC_ARM_LDC_PC_G1
:
18584 case BFD_RELOC_ARM_LDC_PC_G2
:
18585 case BFD_RELOC_ARM_LDC_SB_G0
:
18586 case BFD_RELOC_ARM_LDC_SB_G1
:
18587 case BFD_RELOC_ARM_LDC_SB_G2
:
18588 assert (!fixP
->fx_done
);
18589 if (!seg
->use_rela_p
)
18592 bfd_vma addend_abs
= abs (value
);
18594 /* Check that the absolute value of the addend is a multiple of
18595 four and, when divided by four, fits in 8 bits. */
18596 if (addend_abs
& 0x3)
18597 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18598 _("bad offset 0x%08lX (must be word-aligned)"),
18601 if ((addend_abs
>> 2) > 0xff)
18602 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18603 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18606 /* Extract the instruction. */
18607 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18609 /* If the addend is negative, clear bit 23 of the instruction.
18610 Otherwise set it. */
18612 insn
&= ~(1 << 23);
18616 /* Place the addend (divided by four) into the first eight
18617 bits of the instruction. */
18618 insn
&= 0xfffffff0;
18619 insn
|= addend_abs
>> 2;
18621 /* Update the instruction. */
18622 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18626 case BFD_RELOC_UNUSED
:
18628 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18629 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
18633 /* Translate internal representation of relocation info to BFD target
18637 tc_gen_reloc (asection
*section
, fixS
*fixp
)
18640 bfd_reloc_code_real_type code
;
18642 reloc
= xmalloc (sizeof (arelent
));
18644 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
18645 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
18646 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
18648 if (fixp
->fx_pcrel
)
18650 if (section
->use_rela_p
)
18651 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
18653 fixp
->fx_offset
= reloc
->address
;
18655 reloc
->addend
= fixp
->fx_offset
;
18657 switch (fixp
->fx_r_type
)
18660 if (fixp
->fx_pcrel
)
18662 code
= BFD_RELOC_8_PCREL
;
18667 if (fixp
->fx_pcrel
)
18669 code
= BFD_RELOC_16_PCREL
;
18674 if (fixp
->fx_pcrel
)
18676 code
= BFD_RELOC_32_PCREL
;
18680 case BFD_RELOC_ARM_MOVW
:
18681 if (fixp
->fx_pcrel
)
18683 code
= BFD_RELOC_ARM_MOVW_PCREL
;
18687 case BFD_RELOC_ARM_MOVT
:
18688 if (fixp
->fx_pcrel
)
18690 code
= BFD_RELOC_ARM_MOVT_PCREL
;
18694 case BFD_RELOC_ARM_THUMB_MOVW
:
18695 if (fixp
->fx_pcrel
)
18697 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
18701 case BFD_RELOC_ARM_THUMB_MOVT
:
18702 if (fixp
->fx_pcrel
)
18704 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
18708 case BFD_RELOC_NONE
:
18709 case BFD_RELOC_ARM_PCREL_BRANCH
:
18710 case BFD_RELOC_ARM_PCREL_BLX
:
18711 case BFD_RELOC_RVA
:
18712 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
18713 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
18714 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
18715 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
18716 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
18717 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
18718 case BFD_RELOC_THUMB_PCREL_BLX
:
18719 case BFD_RELOC_VTABLE_ENTRY
:
18720 case BFD_RELOC_VTABLE_INHERIT
:
18722 case BFD_RELOC_32_SECREL
:
18724 code
= fixp
->fx_r_type
;
18727 case BFD_RELOC_ARM_LITERAL
:
18728 case BFD_RELOC_ARM_HWLITERAL
:
18729 /* If this is called then the a literal has
18730 been referenced across a section boundary. */
18731 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18732 _("literal referenced across section boundary"));
18736 case BFD_RELOC_ARM_GOT32
:
18737 case BFD_RELOC_ARM_GOTOFF
:
18738 case BFD_RELOC_ARM_PLT32
:
18739 case BFD_RELOC_ARM_TARGET1
:
18740 case BFD_RELOC_ARM_ROSEGREL32
:
18741 case BFD_RELOC_ARM_SBREL32
:
18742 case BFD_RELOC_ARM_PREL31
:
18743 case BFD_RELOC_ARM_TARGET2
:
18744 case BFD_RELOC_ARM_TLS_LE32
:
18745 case BFD_RELOC_ARM_TLS_LDO32
:
18746 case BFD_RELOC_ARM_PCREL_CALL
:
18747 case BFD_RELOC_ARM_PCREL_JUMP
:
18748 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
18749 case BFD_RELOC_ARM_ALU_PC_G0
:
18750 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
18751 case BFD_RELOC_ARM_ALU_PC_G1
:
18752 case BFD_RELOC_ARM_ALU_PC_G2
:
18753 case BFD_RELOC_ARM_LDR_PC_G0
:
18754 case BFD_RELOC_ARM_LDR_PC_G1
:
18755 case BFD_RELOC_ARM_LDR_PC_G2
:
18756 case BFD_RELOC_ARM_LDRS_PC_G0
:
18757 case BFD_RELOC_ARM_LDRS_PC_G1
:
18758 case BFD_RELOC_ARM_LDRS_PC_G2
:
18759 case BFD_RELOC_ARM_LDC_PC_G0
:
18760 case BFD_RELOC_ARM_LDC_PC_G1
:
18761 case BFD_RELOC_ARM_LDC_PC_G2
:
18762 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
18763 case BFD_RELOC_ARM_ALU_SB_G0
:
18764 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
18765 case BFD_RELOC_ARM_ALU_SB_G1
:
18766 case BFD_RELOC_ARM_ALU_SB_G2
:
18767 case BFD_RELOC_ARM_LDR_SB_G0
:
18768 case BFD_RELOC_ARM_LDR_SB_G1
:
18769 case BFD_RELOC_ARM_LDR_SB_G2
:
18770 case BFD_RELOC_ARM_LDRS_SB_G0
:
18771 case BFD_RELOC_ARM_LDRS_SB_G1
:
18772 case BFD_RELOC_ARM_LDRS_SB_G2
:
18773 case BFD_RELOC_ARM_LDC_SB_G0
:
18774 case BFD_RELOC_ARM_LDC_SB_G1
:
18775 case BFD_RELOC_ARM_LDC_SB_G2
:
18776 code
= fixp
->fx_r_type
;
18779 case BFD_RELOC_ARM_TLS_GD32
:
18780 case BFD_RELOC_ARM_TLS_IE32
:
18781 case BFD_RELOC_ARM_TLS_LDM32
:
18782 /* BFD will include the symbol's address in the addend.
18783 But we don't want that, so subtract it out again here. */
18784 if (!S_IS_COMMON (fixp
->fx_addsy
))
18785 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
18786 code
= fixp
->fx_r_type
;
18790 case BFD_RELOC_ARM_IMMEDIATE
:
18791 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18792 _("internal relocation (type: IMMEDIATE) not fixed up"));
18795 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
18796 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18797 _("ADRL used for a symbol not defined in the same file"));
18800 case BFD_RELOC_ARM_OFFSET_IMM
:
18801 if (section
->use_rela_p
)
18803 code
= fixp
->fx_r_type
;
18807 if (fixp
->fx_addsy
!= NULL
18808 && !S_IS_DEFINED (fixp
->fx_addsy
)
18809 && S_IS_LOCAL (fixp
->fx_addsy
))
18811 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18812 _("undefined local label `%s'"),
18813 S_GET_NAME (fixp
->fx_addsy
));
18817 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18818 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
18825 switch (fixp
->fx_r_type
)
18827 case BFD_RELOC_NONE
: type
= "NONE"; break;
18828 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
18829 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
18830 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
18831 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
18832 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
18833 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
18834 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
18835 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
18836 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
18837 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
18838 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
18839 default: type
= _("<unknown>"); break;
18841 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18842 _("cannot represent %s relocation in this object file format"),
18849 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
18851 && fixp
->fx_addsy
== GOT_symbol
)
18853 code
= BFD_RELOC_ARM_GOTPC
;
18854 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
18858 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
18860 if (reloc
->howto
== NULL
)
18862 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18863 _("cannot represent %s relocation in this object file format"),
18864 bfd_get_reloc_code_name (code
));
18868 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
18869 vtable entry to be used in the relocation's section offset. */
18870 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
18871 reloc
->address
= fixp
->fx_offset
;
18876 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
18879 cons_fix_new_arm (fragS
* frag
,
18884 bfd_reloc_code_real_type type
;
18888 FIXME: @@ Should look at CPU word size. */
18892 type
= BFD_RELOC_8
;
18895 type
= BFD_RELOC_16
;
18899 type
= BFD_RELOC_32
;
18902 type
= BFD_RELOC_64
;
18907 if (exp
->X_op
== O_secrel
)
18909 exp
->X_op
= O_symbol
;
18910 type
= BFD_RELOC_32_SECREL
;
18914 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
18917 #if defined OBJ_COFF || defined OBJ_ELF
18919 arm_validate_fix (fixS
* fixP
)
18921 /* If the destination of the branch is a defined symbol which does not have
18922 the THUMB_FUNC attribute, then we must be calling a function which has
18923 the (interfacearm) attribute. We look for the Thumb entry point to that
18924 function and change the branch to refer to that function instead. */
18925 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
18926 && fixP
->fx_addsy
!= NULL
18927 && S_IS_DEFINED (fixP
->fx_addsy
)
18928 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
18930 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
18936 arm_force_relocation (struct fix
* fixp
)
18938 #if defined (OBJ_COFF) && defined (TE_PE)
18939 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
18943 /* Resolve these relocations even if the symbol is extern or weak. */
18944 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
18945 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
18946 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
18947 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
18948 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
18949 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
18950 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
18953 /* Always leave these relocations for the linker. */
18954 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
18955 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
18956 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
18959 return generic_force_reloc (fixp
);
18962 #if defined (OBJ_ELF) || defined (OBJ_COFF)
18963 /* Relocations against function names must be left unadjusted,
18964 so that the linker can use this information to generate interworking
18965 stubs. The MIPS version of this function
18966 also prevents relocations that are mips-16 specific, but I do not
18967 know why it does this.
18970 There is one other problem that ought to be addressed here, but
18971 which currently is not: Taking the address of a label (rather
18972 than a function) and then later jumping to that address. Such
18973 addresses also ought to have their bottom bit set (assuming that
18974 they reside in Thumb code), but at the moment they will not. */
18977 arm_fix_adjustable (fixS
* fixP
)
18979 if (fixP
->fx_addsy
== NULL
)
18982 /* Preserve relocations against symbols with function type. */
18983 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
18986 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
18987 && fixP
->fx_subsy
== NULL
)
18990 /* We need the symbol name for the VTABLE entries. */
18991 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
18992 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
18995 /* Don't allow symbols to be discarded on GOT related relocs. */
18996 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
18997 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
18998 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
18999 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
19000 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
19001 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
19002 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
19003 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
19004 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
19007 /* Similarly for group relocations. */
19008 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
19009 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
19010 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
19015 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19020 elf32_arm_target_format (void)
19023 return (target_big_endian
19024 ? "elf32-bigarm-symbian"
19025 : "elf32-littlearm-symbian");
19026 #elif defined (TE_VXWORKS)
19027 return (target_big_endian
19028 ? "elf32-bigarm-vxworks"
19029 : "elf32-littlearm-vxworks");
19031 if (target_big_endian
)
19032 return "elf32-bigarm";
19034 return "elf32-littlearm";
19039 armelf_frob_symbol (symbolS
* symp
,
19042 elf_frob_symbol (symp
, puntp
);
19046 /* MD interface: Finalization. */
19048 /* A good place to do this, although this was probably not intended
19049 for this kind of use. We need to dump the literal pool before
19050 references are made to a null symbol pointer. */
19055 literal_pool
* pool
;
19057 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
19059 /* Put it at the end of the relevent section. */
19060 subseg_set (pool
->section
, pool
->sub_section
);
19062 arm_elf_change_section ();
19068 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19072 arm_adjust_symtab (void)
19077 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
19079 if (ARM_IS_THUMB (sym
))
19081 if (THUMB_IS_FUNC (sym
))
19083 /* Mark the symbol as a Thumb function. */
19084 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
19085 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
19086 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
19088 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
19089 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
19091 as_bad (_("%s: unexpected function type: %d"),
19092 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
19094 else switch (S_GET_STORAGE_CLASS (sym
))
19097 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
19100 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
19103 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
19111 if (ARM_IS_INTERWORK (sym
))
19112 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
19119 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
19121 if (ARM_IS_THUMB (sym
))
19123 elf_symbol_type
* elf_sym
;
19125 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
19126 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
19128 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
19129 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
19131 /* If it's a .thumb_func, declare it as so,
19132 otherwise tag label as .code 16. */
19133 if (THUMB_IS_FUNC (sym
))
19134 elf_sym
->internal_elf_sym
.st_info
=
19135 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
19136 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
19137 elf_sym
->internal_elf_sym
.st_info
=
19138 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
19145 /* MD interface: Initialization. */
19148 set_constant_flonums (void)
19152 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
19153 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
19157 /* Auto-select Thumb mode if it's the only available instruction set for the
19158 given architecture. */
19161 autoselect_thumb_from_cpu_variant (void)
19163 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
19164 opcode_select (16);
19173 if ( (arm_ops_hsh
= hash_new ()) == NULL
19174 || (arm_cond_hsh
= hash_new ()) == NULL
19175 || (arm_shift_hsh
= hash_new ()) == NULL
19176 || (arm_psr_hsh
= hash_new ()) == NULL
19177 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
19178 || (arm_reg_hsh
= hash_new ()) == NULL
19179 || (arm_reloc_hsh
= hash_new ()) == NULL
19180 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
19181 as_fatal (_("virtual memory exhausted"));
19183 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
19184 hash_insert (arm_ops_hsh
, insns
[i
].template, (PTR
) (insns
+ i
));
19185 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
19186 hash_insert (arm_cond_hsh
, conds
[i
].template, (PTR
) (conds
+ i
));
19187 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
19188 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (PTR
) (shift_names
+ i
));
19189 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
19190 hash_insert (arm_psr_hsh
, psrs
[i
].template, (PTR
) (psrs
+ i
));
19191 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
19192 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (PTR
) (v7m_psrs
+ i
));
19193 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
19194 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (PTR
) (reg_names
+ i
));
19196 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
19198 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
19199 (PTR
) (barrier_opt_names
+ i
));
19201 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
19202 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (PTR
) (reloc_names
+ i
));
19205 set_constant_flonums ();
19207 /* Set the cpu variant based on the command-line options. We prefer
19208 -mcpu= over -march= if both are set (as for GCC); and we prefer
19209 -mfpu= over any other way of setting the floating point unit.
19210 Use of legacy options with new options are faulted. */
19213 if (mcpu_cpu_opt
|| march_cpu_opt
)
19214 as_bad (_("use of old and new-style options to set CPU type"));
19216 mcpu_cpu_opt
= legacy_cpu
;
19218 else if (!mcpu_cpu_opt
)
19219 mcpu_cpu_opt
= march_cpu_opt
;
19224 as_bad (_("use of old and new-style options to set FPU type"));
19226 mfpu_opt
= legacy_fpu
;
19228 else if (!mfpu_opt
)
19230 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19231 /* Some environments specify a default FPU. If they don't, infer it
19232 from the processor. */
19234 mfpu_opt
= mcpu_fpu_opt
;
19236 mfpu_opt
= march_fpu_opt
;
19238 mfpu_opt
= &fpu_default
;
19245 mfpu_opt
= &fpu_default
;
19246 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
19247 mfpu_opt
= &fpu_arch_vfp_v2
;
19249 mfpu_opt
= &fpu_arch_fpa
;
19255 mcpu_cpu_opt
= &cpu_default
;
19256 selected_cpu
= cpu_default
;
19260 selected_cpu
= *mcpu_cpu_opt
;
19262 mcpu_cpu_opt
= &arm_arch_any
;
19265 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
19267 autoselect_thumb_from_cpu_variant ();
19269 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
19271 #if defined OBJ_COFF || defined OBJ_ELF
19273 unsigned int flags
= 0;
19275 #if defined OBJ_ELF
19276 flags
= meabi_flags
;
19278 switch (meabi_flags
)
19280 case EF_ARM_EABI_UNKNOWN
:
19282 /* Set the flags in the private structure. */
19283 if (uses_apcs_26
) flags
|= F_APCS26
;
19284 if (support_interwork
) flags
|= F_INTERWORK
;
19285 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
19286 if (pic_code
) flags
|= F_PIC
;
19287 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
19288 flags
|= F_SOFT_FLOAT
;
19290 switch (mfloat_abi_opt
)
19292 case ARM_FLOAT_ABI_SOFT
:
19293 case ARM_FLOAT_ABI_SOFTFP
:
19294 flags
|= F_SOFT_FLOAT
;
19297 case ARM_FLOAT_ABI_HARD
:
19298 if (flags
& F_SOFT_FLOAT
)
19299 as_bad (_("hard-float conflicts with specified fpu"));
19303 /* Using pure-endian doubles (even if soft-float). */
19304 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
19305 flags
|= F_VFP_FLOAT
;
19307 #if defined OBJ_ELF
19308 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
19309 flags
|= EF_ARM_MAVERICK_FLOAT
;
19312 case EF_ARM_EABI_VER4
:
19313 case EF_ARM_EABI_VER5
:
19314 /* No additional flags to set. */
19321 bfd_set_private_flags (stdoutput
, flags
);
19323 /* We have run out flags in the COFF header to encode the
19324 status of ATPCS support, so instead we create a dummy,
19325 empty, debug section called .arm.atpcs. */
19330 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
19334 bfd_set_section_flags
19335 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
19336 bfd_set_section_size (stdoutput
, sec
, 0);
19337 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
19343 /* Record the CPU type as well. */
19344 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
19345 mach
= bfd_mach_arm_iWMMXt2
;
19346 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
19347 mach
= bfd_mach_arm_iWMMXt
;
19348 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
19349 mach
= bfd_mach_arm_XScale
;
19350 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
19351 mach
= bfd_mach_arm_ep9312
;
19352 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
19353 mach
= bfd_mach_arm_5TE
;
19354 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
19356 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
19357 mach
= bfd_mach_arm_5T
;
19359 mach
= bfd_mach_arm_5
;
19361 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
19363 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
19364 mach
= bfd_mach_arm_4T
;
19366 mach
= bfd_mach_arm_4
;
19368 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
19369 mach
= bfd_mach_arm_3M
;
19370 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
19371 mach
= bfd_mach_arm_3
;
19372 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
19373 mach
= bfd_mach_arm_2a
;
19374 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
19375 mach
= bfd_mach_arm_2
;
19377 mach
= bfd_mach_arm_unknown
;
19379 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
19382 /* Command line processing. */
19385 Invocation line includes a switch not recognized by the base assembler.
19386 See if it's a processor-specific option.
19388 This routine is somewhat complicated by the need for backwards
19389 compatibility (since older releases of gcc can't be changed).
19390 The new options try to make the interface as compatible as
19393 New options (supported) are:
19395 -mcpu=<cpu name> Assemble for selected processor
19396 -march=<architecture name> Assemble for selected architecture
19397 -mfpu=<fpu architecture> Assemble for selected FPU.
19398 -EB/-mbig-endian Big-endian
19399 -EL/-mlittle-endian Little-endian
19400 -k Generate PIC code
19401 -mthumb Start in Thumb mode
19402 -mthumb-interwork Code supports ARM/Thumb interworking
19404 For now we will also provide support for:
19406 -mapcs-32 32-bit Program counter
19407 -mapcs-26 26-bit Program counter
19408 -macps-float Floats passed in FP registers
19409 -mapcs-reentrant Reentrant code
19411 (sometime these will probably be replaced with -mapcs=<list of options>
19412 and -matpcs=<list of options>)
19414 The remaining options are only supported for back-wards compatibility.
19415 Cpu variants, the arm part is optional:
19416 -m[arm]1 Currently not supported.
19417 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19418 -m[arm]3 Arm 3 processor
19419 -m[arm]6[xx], Arm 6 processors
19420 -m[arm]7[xx][t][[d]m] Arm 7 processors
19421 -m[arm]8[10] Arm 8 processors
19422 -m[arm]9[20][tdmi] Arm 9 processors
19423 -mstrongarm[110[0]] StrongARM processors
19424 -mxscale XScale processors
19425 -m[arm]v[2345[t[e]]] Arm architectures
19426 -mall All (except the ARM1)
19428 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19429 -mfpe-old (No float load/store multiples)
19430 -mvfpxd VFP Single precision
19432 -mno-fpu Disable all floating point instructions
19434 The following CPU names are recognized:
19435 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19436 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19437 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19438 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19439 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19440 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19441 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19445 const char * md_shortopts
= "m:k";
19447 #ifdef ARM_BI_ENDIAN
19448 #define OPTION_EB (OPTION_MD_BASE + 0)
19449 #define OPTION_EL (OPTION_MD_BASE + 1)
19451 #if TARGET_BYTES_BIG_ENDIAN
19452 #define OPTION_EB (OPTION_MD_BASE + 0)
19454 #define OPTION_EL (OPTION_MD_BASE + 1)
19458 struct option md_longopts
[] =
19461 {"EB", no_argument
, NULL
, OPTION_EB
},
19464 {"EL", no_argument
, NULL
, OPTION_EL
},
19466 {NULL
, no_argument
, NULL
, 0}
19469 size_t md_longopts_size
= sizeof (md_longopts
);
19471 struct arm_option_table
19473 char *option
; /* Option name to match. */
19474 char *help
; /* Help information. */
19475 int *var
; /* Variable to change. */
19476 int value
; /* What to change it to. */
19477 char *deprecated
; /* If non-null, print this message. */
19480 struct arm_option_table arm_opts
[] =
19482 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
19483 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
19484 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19485 &support_interwork
, 1, NULL
},
19486 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
19487 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
19488 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
19490 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
19491 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
19492 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
19493 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
19496 /* These are recognized by the assembler, but have no affect on code. */
19497 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
19498 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
19499 {NULL
, NULL
, NULL
, 0, NULL
}
19502 struct arm_legacy_option_table
19504 char *option
; /* Option name to match. */
19505 const arm_feature_set
**var
; /* Variable to change. */
19506 const arm_feature_set value
; /* What to change it to. */
19507 char *deprecated
; /* If non-null, print this message. */
19510 const struct arm_legacy_option_table arm_legacy_opts
[] =
19512 /* DON'T add any new processors to this list -- we want the whole list
19513 to go away... Add them to the processors table instead. */
19514 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
19515 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
19516 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
19517 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
19518 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
19519 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
19520 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
19521 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
19522 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
19523 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
19524 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
19525 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
19526 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
19527 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
19528 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
19529 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
19530 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
19531 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
19532 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
19533 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
19534 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
19535 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
19536 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
19537 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
19538 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
19539 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
19540 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
19541 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
19542 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
19543 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
19544 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
19545 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
19546 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
19547 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
19548 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
19549 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
19550 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
19551 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
19552 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
19553 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
19554 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
19555 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
19556 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
19557 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
19558 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
19559 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
19560 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19561 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19562 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19563 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19564 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
19565 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
19566 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
19567 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
19568 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
19569 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
19570 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
19571 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
19572 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
19573 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
19574 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
19575 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
19576 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
19577 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
19578 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
19579 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
19580 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
19581 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
19582 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
19583 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
19584 N_("use -mcpu=strongarm110")},
19585 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
19586 N_("use -mcpu=strongarm1100")},
19587 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
19588 N_("use -mcpu=strongarm1110")},
19589 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
19590 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
19591 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
19593 /* Architecture variants -- don't add any more to this list either. */
19594 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
19595 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
19596 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
19597 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
19598 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
19599 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
19600 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
19601 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
19602 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
19603 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
19604 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
19605 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
19606 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
19607 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
19608 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
19609 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
19610 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
19611 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
19613 /* Floating point variants -- don't add any more to this list either. */
19614 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
19615 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
19616 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
19617 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
19618 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19620 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
19623 struct arm_cpu_option_table
19626 const arm_feature_set value
;
19627 /* For some CPUs we assume an FPU unless the user explicitly sets
19629 const arm_feature_set default_fpu
;
19630 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19632 const char *canonical_name
;
19635 /* This list should, at a minimum, contain all the cpu names
19636 recognized by GCC. */
19637 static const struct arm_cpu_option_table arm_cpus
[] =
19639 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
19640 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
19641 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
19642 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
19643 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
19644 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19645 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19646 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19647 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19648 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19649 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19650 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
19651 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19652 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
19653 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19654 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
19655 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19656 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19657 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19658 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19659 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19660 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19661 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19662 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19663 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19664 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19665 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19666 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19667 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19668 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19669 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19670 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19671 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19672 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19673 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19674 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19675 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19676 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19677 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19678 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
19679 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19680 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19681 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19682 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19683 /* For V5 or later processors we default to using VFP; but the user
19684 should really set the FPU type explicitly. */
19685 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
19686 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19687 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
19688 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
19689 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
19690 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
19691 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
19692 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19693 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
19694 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
19695 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19696 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19697 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
19698 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
19699 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19700 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
19701 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
19702 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19703 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19704 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
19705 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
19706 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
19707 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
19708 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
19709 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
19710 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
19711 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
19712 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
19713 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
19714 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
19715 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
19716 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
19717 | FPU_NEON_EXT_V1
),
19719 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
19720 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
19721 /* ??? XSCALE is really an architecture. */
19722 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
19723 /* ??? iwmmxt is not a processor. */
19724 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
19725 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
19726 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
19728 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
19729 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
19732 struct arm_arch_option_table
19735 const arm_feature_set value
;
19736 const arm_feature_set default_fpu
;
19739 /* This list should, at a minimum, contain all the architecture names
19740 recognized by GCC. */
19741 static const struct arm_arch_option_table arm_archs
[] =
19743 {"all", ARM_ANY
, FPU_ARCH_FPA
},
19744 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
19745 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
19746 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
19747 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
19748 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
19749 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
19750 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
19751 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
19752 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
19753 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
19754 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
19755 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
19756 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
19757 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
19758 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
19759 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
19760 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
19761 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
19762 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
19763 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
19764 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
19765 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
19766 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
19767 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
19768 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
19769 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
19770 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
19771 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
19772 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
19773 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
19774 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
19775 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
19776 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
19779 /* ISA extensions in the co-processor space. */
19780 struct arm_option_cpu_value_table
19783 const arm_feature_set value
;
19786 static const struct arm_option_cpu_value_table arm_extensions
[] =
19788 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
19789 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
19790 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
19791 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
)},
19792 {NULL
, ARM_ARCH_NONE
}
19795 /* This list should, at a minimum, contain all the fpu names
19796 recognized by GCC. */
19797 static const struct arm_option_cpu_value_table arm_fpus
[] =
19799 {"softfpa", FPU_NONE
},
19800 {"fpe", FPU_ARCH_FPE
},
19801 {"fpe2", FPU_ARCH_FPE
},
19802 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
19803 {"fpa", FPU_ARCH_FPA
},
19804 {"fpa10", FPU_ARCH_FPA
},
19805 {"fpa11", FPU_ARCH_FPA
},
19806 {"arm7500fe", FPU_ARCH_FPA
},
19807 {"softvfp", FPU_ARCH_VFP
},
19808 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
19809 {"vfp", FPU_ARCH_VFP_V2
},
19810 {"vfp9", FPU_ARCH_VFP_V2
},
19811 {"vfp3", FPU_ARCH_VFP_V3
},
19812 {"vfp10", FPU_ARCH_VFP_V2
},
19813 {"vfp10-r0", FPU_ARCH_VFP_V1
},
19814 {"vfpxd", FPU_ARCH_VFP_V1xD
},
19815 {"arm1020t", FPU_ARCH_VFP_V1
},
19816 {"arm1020e", FPU_ARCH_VFP_V2
},
19817 {"arm1136jfs", FPU_ARCH_VFP_V2
},
19818 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
19819 {"maverick", FPU_ARCH_MAVERICK
},
19820 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
19821 {NULL
, ARM_ARCH_NONE
}
19824 struct arm_option_value_table
19830 static const struct arm_option_value_table arm_float_abis
[] =
19832 {"hard", ARM_FLOAT_ABI_HARD
},
19833 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
19834 {"soft", ARM_FLOAT_ABI_SOFT
},
19839 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
19840 static const struct arm_option_value_table arm_eabis
[] =
19842 {"gnu", EF_ARM_EABI_UNKNOWN
},
19843 {"4", EF_ARM_EABI_VER4
},
19844 {"5", EF_ARM_EABI_VER5
},
19849 struct arm_long_option_table
19851 char * option
; /* Substring to match. */
19852 char * help
; /* Help information. */
19853 int (* func
) (char * subopt
); /* Function to decode sub-option. */
19854 char * deprecated
; /* If non-null, print this message. */
19858 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
19860 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
19862 /* Copy the feature set, so that we can modify it. */
19863 *ext_set
= **opt_p
;
19866 while (str
!= NULL
&& *str
!= 0)
19868 const struct arm_option_cpu_value_table
* opt
;
19874 as_bad (_("invalid architectural extension"));
19879 ext
= strchr (str
, '+');
19882 optlen
= ext
- str
;
19884 optlen
= strlen (str
);
19888 as_bad (_("missing architectural extension"));
19892 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
19893 if (strncmp (opt
->name
, str
, optlen
) == 0)
19895 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
19899 if (opt
->name
== NULL
)
19901 as_bad (_("unknown architectural extnsion `%s'"), str
);
19912 arm_parse_cpu (char * str
)
19914 const struct arm_cpu_option_table
* opt
;
19915 char * ext
= strchr (str
, '+');
19919 optlen
= ext
- str
;
19921 optlen
= strlen (str
);
19925 as_bad (_("missing cpu name `%s'"), str
);
19929 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
19930 if (strncmp (opt
->name
, str
, optlen
) == 0)
19932 mcpu_cpu_opt
= &opt
->value
;
19933 mcpu_fpu_opt
= &opt
->default_fpu
;
19934 if (opt
->canonical_name
)
19935 strcpy(selected_cpu_name
, opt
->canonical_name
);
19939 for (i
= 0; i
< optlen
; i
++)
19940 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
19941 selected_cpu_name
[i
] = 0;
19945 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
19950 as_bad (_("unknown cpu `%s'"), str
);
19955 arm_parse_arch (char * str
)
19957 const struct arm_arch_option_table
*opt
;
19958 char *ext
= strchr (str
, '+');
19962 optlen
= ext
- str
;
19964 optlen
= strlen (str
);
19968 as_bad (_("missing architecture name `%s'"), str
);
19972 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
19973 if (streq (opt
->name
, str
))
19975 march_cpu_opt
= &opt
->value
;
19976 march_fpu_opt
= &opt
->default_fpu
;
19977 strcpy(selected_cpu_name
, opt
->name
);
19980 return arm_parse_extension (ext
, &march_cpu_opt
);
19985 as_bad (_("unknown architecture `%s'\n"), str
);
19990 arm_parse_fpu (char * str
)
19992 const struct arm_option_cpu_value_table
* opt
;
19994 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
19995 if (streq (opt
->name
, str
))
19997 mfpu_opt
= &opt
->value
;
20001 as_bad (_("unknown floating point format `%s'\n"), str
);
20006 arm_parse_float_abi (char * str
)
20008 const struct arm_option_value_table
* opt
;
20010 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
20011 if (streq (opt
->name
, str
))
20013 mfloat_abi_opt
= opt
->value
;
20017 as_bad (_("unknown floating point abi `%s'\n"), str
);
20023 arm_parse_eabi (char * str
)
20025 const struct arm_option_value_table
*opt
;
20027 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
20028 if (streq (opt
->name
, str
))
20030 meabi_flags
= opt
->value
;
20033 as_bad (_("unknown EABI `%s'\n"), str
);
20038 struct arm_long_option_table arm_long_opts
[] =
20040 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20041 arm_parse_cpu
, NULL
},
20042 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20043 arm_parse_arch
, NULL
},
20044 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20045 arm_parse_fpu
, NULL
},
20046 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20047 arm_parse_float_abi
, NULL
},
20049 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20050 arm_parse_eabi
, NULL
},
20052 {NULL
, NULL
, 0, NULL
}
20056 md_parse_option (int c
, char * arg
)
20058 struct arm_option_table
*opt
;
20059 const struct arm_legacy_option_table
*fopt
;
20060 struct arm_long_option_table
*lopt
;
20066 target_big_endian
= 1;
20072 target_big_endian
= 0;
20077 /* Listing option. Just ignore these, we don't support additional
20082 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
20084 if (c
== opt
->option
[0]
20085 && ((arg
== NULL
&& opt
->option
[1] == 0)
20086 || streq (arg
, opt
->option
+ 1)))
20088 #if WARN_DEPRECATED
20089 /* If the option is deprecated, tell the user. */
20090 if (opt
->deprecated
!= NULL
)
20091 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
20092 arg
? arg
: "", _(opt
->deprecated
));
20095 if (opt
->var
!= NULL
)
20096 *opt
->var
= opt
->value
;
20102 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
20104 if (c
== fopt
->option
[0]
20105 && ((arg
== NULL
&& fopt
->option
[1] == 0)
20106 || streq (arg
, fopt
->option
+ 1)))
20108 #if WARN_DEPRECATED
20109 /* If the option is deprecated, tell the user. */
20110 if (fopt
->deprecated
!= NULL
)
20111 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
20112 arg
? arg
: "", _(fopt
->deprecated
));
20115 if (fopt
->var
!= NULL
)
20116 *fopt
->var
= &fopt
->value
;
20122 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
20124 /* These options are expected to have an argument. */
20125 if (c
== lopt
->option
[0]
20127 && strncmp (arg
, lopt
->option
+ 1,
20128 strlen (lopt
->option
+ 1)) == 0)
20130 #if WARN_DEPRECATED
20131 /* If the option is deprecated, tell the user. */
20132 if (lopt
->deprecated
!= NULL
)
20133 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
20134 _(lopt
->deprecated
));
20137 /* Call the sup-option parser. */
20138 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
20149 md_show_usage (FILE * fp
)
20151 struct arm_option_table
*opt
;
20152 struct arm_long_option_table
*lopt
;
20154 fprintf (fp
, _(" ARM-specific assembler options:\n"));
20156 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
20157 if (opt
->help
!= NULL
)
20158 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
20160 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
20161 if (lopt
->help
!= NULL
)
20162 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
20166 -EB assemble code for a big-endian cpu\n"));
20171 -EL assemble code for a little-endian cpu\n"));
20180 arm_feature_set flags
;
20181 } cpu_arch_ver_table
;
20183 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20184 least features first. */
20185 static const cpu_arch_ver_table cpu_arch_ver
[] =
20190 {4, ARM_ARCH_V5TE
},
20191 {5, ARM_ARCH_V5TEJ
},
20195 {9, ARM_ARCH_V6T2
},
20196 {10, ARM_ARCH_V7A
},
20197 {10, ARM_ARCH_V7R
},
20198 {10, ARM_ARCH_V7M
},
20202 /* Set the public EABI object attributes. */
20204 aeabi_set_public_attributes (void)
20207 arm_feature_set flags
;
20208 arm_feature_set tmp
;
20209 const cpu_arch_ver_table
*p
;
20211 /* Choose the architecture based on the capabilities of the requested cpu
20212 (if any) and/or the instructions actually used. */
20213 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
20214 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
20215 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
20216 /*Allow the user to override the reported architecture. */
20219 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
20220 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
20225 for (p
= cpu_arch_ver
; p
->val
; p
++)
20227 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
20230 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
20234 /* Tag_CPU_name. */
20235 if (selected_cpu_name
[0])
20239 p
= selected_cpu_name
;
20240 if (strncmp(p
, "armv", 4) == 0)
20245 for (i
= 0; p
[i
]; i
++)
20246 p
[i
] = TOUPPER (p
[i
]);
20248 elf32_arm_add_eabi_attr_string (stdoutput
, 5, p
);
20250 /* Tag_CPU_arch. */
20251 elf32_arm_add_eabi_attr_int (stdoutput
, 6, arch
);
20252 /* Tag_CPU_arch_profile. */
20253 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
20254 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'A');
20255 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
20256 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'R');
20257 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
))
20258 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'M');
20259 /* Tag_ARM_ISA_use. */
20260 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_full
))
20261 elf32_arm_add_eabi_attr_int (stdoutput
, 8, 1);
20262 /* Tag_THUMB_ISA_use. */
20263 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_full
))
20264 elf32_arm_add_eabi_attr_int (stdoutput
, 9,
20265 ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
) ? 2 : 1);
20266 /* Tag_VFP_arch. */
20267 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v3
)
20268 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v3
))
20269 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 3);
20270 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v2
)
20271 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v2
))
20272 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 2);
20273 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1
)
20274 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1
)
20275 || ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1xd
)
20276 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1xd
))
20277 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 1);
20278 /* Tag_WMMX_arch. */
20279 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_cext_iwmmxt
)
20280 || ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_cext_iwmmxt
))
20281 elf32_arm_add_eabi_attr_int (stdoutput
, 11, 1);
20282 /* Tag_NEON_arch. */
20283 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_neon_ext_v1
)
20284 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_neon_ext_v1
))
20285 elf32_arm_add_eabi_attr_int (stdoutput
, 12, 1);
20288 /* Add the .ARM.attributes section. */
20297 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
20300 aeabi_set_public_attributes ();
20301 size
= elf32_arm_eabi_attr_size (stdoutput
);
20302 s
= subseg_new (".ARM.attributes", 0);
20303 bfd_set_section_flags (stdoutput
, s
, SEC_READONLY
| SEC_DATA
);
20304 addr
= frag_now_fix ();
20305 p
= frag_more (size
);
20306 elf32_arm_set_eabi_attr_contents (stdoutput
, (bfd_byte
*)p
, size
);
20308 #endif /* OBJ_ELF */
20311 /* Parse a .cpu directive. */
20314 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
20316 const struct arm_cpu_option_table
*opt
;
20320 name
= input_line_pointer
;
20321 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20322 input_line_pointer
++;
20323 saved_char
= *input_line_pointer
;
20324 *input_line_pointer
= 0;
20326 /* Skip the first "all" entry. */
20327 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
20328 if (streq (opt
->name
, name
))
20330 mcpu_cpu_opt
= &opt
->value
;
20331 selected_cpu
= opt
->value
;
20332 if (opt
->canonical_name
)
20333 strcpy(selected_cpu_name
, opt
->canonical_name
);
20337 for (i
= 0; opt
->name
[i
]; i
++)
20338 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
20339 selected_cpu_name
[i
] = 0;
20341 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20342 *input_line_pointer
= saved_char
;
20343 demand_empty_rest_of_line ();
20346 as_bad (_("unknown cpu `%s'"), name
);
20347 *input_line_pointer
= saved_char
;
20348 ignore_rest_of_line ();
20352 /* Parse a .arch directive. */
20355 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
20357 const struct arm_arch_option_table
*opt
;
20361 name
= input_line_pointer
;
20362 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20363 input_line_pointer
++;
20364 saved_char
= *input_line_pointer
;
20365 *input_line_pointer
= 0;
20367 /* Skip the first "all" entry. */
20368 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
20369 if (streq (opt
->name
, name
))
20371 mcpu_cpu_opt
= &opt
->value
;
20372 selected_cpu
= opt
->value
;
20373 strcpy(selected_cpu_name
, opt
->name
);
20374 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20375 *input_line_pointer
= saved_char
;
20376 demand_empty_rest_of_line ();
20380 as_bad (_("unknown architecture `%s'\n"), name
);
20381 *input_line_pointer
= saved_char
;
20382 ignore_rest_of_line ();
20386 /* Parse a .object_arch directive. */
20389 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
20391 const struct arm_arch_option_table
*opt
;
20395 name
= input_line_pointer
;
20396 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20397 input_line_pointer
++;
20398 saved_char
= *input_line_pointer
;
20399 *input_line_pointer
= 0;
20401 /* Skip the first "all" entry. */
20402 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
20403 if (streq (opt
->name
, name
))
20405 object_arch
= &opt
->value
;
20406 *input_line_pointer
= saved_char
;
20407 demand_empty_rest_of_line ();
20411 as_bad (_("unknown architecture `%s'\n"), name
);
20412 *input_line_pointer
= saved_char
;
20413 ignore_rest_of_line ();
20417 /* Parse a .fpu directive. */
20420 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
20422 const struct arm_option_cpu_value_table
*opt
;
20426 name
= input_line_pointer
;
20427 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20428 input_line_pointer
++;
20429 saved_char
= *input_line_pointer
;
20430 *input_line_pointer
= 0;
20432 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
20433 if (streq (opt
->name
, name
))
20435 mfpu_opt
= &opt
->value
;
20436 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20437 *input_line_pointer
= saved_char
;
20438 demand_empty_rest_of_line ();
20442 as_bad (_("unknown floating point format `%s'\n"), name
);
20443 *input_line_pointer
= saved_char
;
20444 ignore_rest_of_line ();