1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
34 /* Need TARGET_CPU. */
41 #include "opcode/arm.h"
45 #include "dwarf2dbg.h"
46 #include "dw2gencfi.h"
49 /* XXX Set this to 1 after the next binutils release. */
50 #define WARN_DEPRECATED 0
53 /* Must be at least the size of the largest unwind opcode (currently two). */
54 #define ARM_OPCODE_CHUNK_SIZE 8
56 /* This structure holds the unwinding state. */
61 symbolS
* table_entry
;
62 symbolS
* personality_routine
;
63 int personality_index
;
64 /* The segment containing the function. */
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes
;
71 /* The number of bytes pushed to the stack. */
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset
;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
81 /* Nonzero if an unwind_setfp directive has been seen. */
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored
:1;
87 /* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90 static unsigned int marked_pr_dependency
= 0;
101 /* Types of processor to assemble for. */
103 #if defined __XSCALE__
104 #define CPU_DEFAULT ARM_ARCH_XSCALE
106 #if defined __thumb__
107 #define CPU_DEFAULT ARM_ARCH_V5T
114 # define FPU_DEFAULT FPU_ARCH_FPA
115 # elif defined (TE_NetBSD)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
119 /* Legacy a.out format. */
120 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
122 # elif defined (TE_VXWORKS)
123 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
125 /* For backwards compatibility, default to FPA. */
126 # define FPU_DEFAULT FPU_ARCH_FPA
128 #endif /* ifndef FPU_DEFAULT */
130 #define streq(a, b) (strcmp (a, b) == 0)
132 static arm_feature_set cpu_variant
;
133 static arm_feature_set arm_arch_used
;
134 static arm_feature_set thumb_arch_used
;
136 /* Flags stored in private area of BFD structure. */
137 static int uses_apcs_26
= FALSE
;
138 static int atpcs
= FALSE
;
139 static int support_interwork
= FALSE
;
140 static int uses_apcs_float
= FALSE
;
141 static int pic_code
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
155 /* Constants for known architecture features. */
156 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
157 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
158 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
159 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
160 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
161 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
162 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
163 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
164 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
167 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
170 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
171 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
172 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
173 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
174 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
175 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
176 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
177 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
178 static const arm_feature_set arm_ext_v4t_5
=
179 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
180 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
181 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
182 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
183 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
184 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
185 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
186 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
188 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
189 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
190 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
191 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
192 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
193 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
195 static const arm_feature_set arm_arch_any
= ARM_ANY
;
196 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
197 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
198 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
200 static const arm_feature_set arm_cext_iwmmxt
=
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
202 static const arm_feature_set arm_cext_xscale
=
203 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
204 static const arm_feature_set arm_cext_maverick
=
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
206 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
207 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
208 static const arm_feature_set fpu_vfp_ext_v1xd
=
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
210 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
211 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
212 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
213 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
214 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
215 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
217 static int mfloat_abi_opt
= -1;
218 /* Record user cpu selection for object attributes. */
219 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
220 /* Must be long enough to hold any of the names in arm_cpus. */
221 static char selected_cpu_name
[16];
224 static int meabi_flags
= EABI_DEFAULT
;
226 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
231 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232 symbolS
* GOT_symbol
;
235 /* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
239 static int thumb_mode
= 0;
241 /* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
253 Important differences from the old Thumb mode:
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
264 static bfd_boolean unified_syntax
= FALSE
;
279 enum neon_el_type type
;
283 #define NEON_MAX_TYPE_ELS 4
287 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
294 unsigned long instruction
;
298 struct neon_type vectype
;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
304 bfd_reloc_code_real_type type
;
313 struct neon_type_el vectype
;
314 unsigned present
: 1; /* Operand present. */
315 unsigned isreg
: 1; /* Operand was a register. */
316 unsigned immisreg
: 1; /* .imm field is a second register. */
317 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
318 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
319 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
320 instructions. This allows us to disambiguate ARM <-> vector insns. */
321 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
322 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
323 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
324 unsigned writeback
: 1; /* Operand has trailing ! */
325 unsigned preind
: 1; /* Preindexed address. */
326 unsigned postind
: 1; /* Postindexed address. */
327 unsigned negative
: 1; /* Index register was negated. */
328 unsigned shifted
: 1; /* Shift applied to operation. */
329 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
333 static struct arm_it inst
;
335 #define NUM_FLOAT_VALS 8
337 const char * fp_const
[] =
339 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
342 /* Number of littlenums required to hold an extended precision number. */
343 #define MAX_LITTLENUMS 6
345 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
355 #define CP_T_X 0x00008000
356 #define CP_T_Y 0x00400000
358 #define CONDS_BIT 0x00100000
359 #define LOAD_BIT 0x00100000
361 #define DOUBLE_LOAD_FLAG 0x00000001
365 const char * template;
369 #define COND_ALWAYS 0xE
373 const char *template;
377 struct asm_barrier_opt
379 const char *template;
383 /* The bit that distinguishes CPSR and SPSR. */
384 #define SPSR_BIT (1 << 22)
386 /* The individual PSR flag bits. */
387 #define PSR_c (1 << 16)
388 #define PSR_x (1 << 17)
389 #define PSR_s (1 << 18)
390 #define PSR_f (1 << 19)
395 bfd_reloc_code_real_type reloc
;
400 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
401 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
406 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
409 /* Bits for DEFINED field in neon_typed_alias. */
410 #define NTA_HASTYPE 1
411 #define NTA_HASINDEX 2
413 struct neon_typed_alias
415 unsigned char defined
;
417 struct neon_type_el eltype
;
420 /* ARM register categories. This includes coprocessor numbers and various
421 architecture extensions' registers. */
445 /* Structure for a hash table entry for a register.
446 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
447 information which states whether a vector type or index is specified (for a
448 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
452 unsigned char number
;
454 unsigned char builtin
;
455 struct neon_typed_alias
*neon
;
458 /* Diagnostics used when we don't get a register of the expected type. */
459 const char *const reg_expected_msgs
[] =
461 N_("ARM register expected"),
462 N_("bad or missing co-processor number"),
463 N_("co-processor register expected"),
464 N_("FPA register expected"),
465 N_("VFP single precision register expected"),
466 N_("VFP/Neon double precision register expected"),
467 N_("Neon quad precision register expected"),
468 N_("Neon double or quad precision register expected"),
469 N_("VFP system register expected"),
470 N_("Maverick MVF register expected"),
471 N_("Maverick MVD register expected"),
472 N_("Maverick MVFX register expected"),
473 N_("Maverick MVDX register expected"),
474 N_("Maverick MVAX register expected"),
475 N_("Maverick DSPSC register expected"),
476 N_("iWMMXt data register expected"),
477 N_("iWMMXt control register expected"),
478 N_("iWMMXt scalar register expected"),
479 N_("XScale accumulator register expected"),
482 /* Some well known registers that we refer to directly elsewhere. */
487 /* ARM instructions take 4bytes in the object file, Thumb instructions
493 /* Basic string to match. */
494 const char *template;
496 /* Parameters to instruction. */
497 unsigned char operands
[8];
499 /* Conditional tag - see opcode_lookup. */
500 unsigned int tag
: 4;
502 /* Basic instruction code. */
503 unsigned int avalue
: 28;
505 /* Thumb-format instruction code. */
508 /* Which architecture variant provides this instruction. */
509 const arm_feature_set
*avariant
;
510 const arm_feature_set
*tvariant
;
512 /* Function to call to encode instruction in ARM format. */
513 void (* aencode
) (void);
515 /* Function to call to encode instruction in Thumb format. */
516 void (* tencode
) (void);
519 /* Defines for various bits that we will want to toggle. */
520 #define INST_IMMEDIATE 0x02000000
521 #define OFFSET_REG 0x02000000
522 #define HWOFFSET_IMM 0x00400000
523 #define SHIFT_BY_REG 0x00000010
524 #define PRE_INDEX 0x01000000
525 #define INDEX_UP 0x00800000
526 #define WRITE_BACK 0x00200000
527 #define LDM_TYPE_2_OR_3 0x00400000
529 #define LITERAL_MASK 0xf000f000
530 #define OPCODE_MASK 0xfe1fffff
531 #define V4_STR_BIT 0x00000020
533 #define DATA_OP_SHIFT 21
535 #define T2_OPCODE_MASK 0xfe1fffff
536 #define T2_DATA_OP_SHIFT 21
538 /* Codes to distinguish the arithmetic instructions. */
549 #define OPCODE_CMP 10
550 #define OPCODE_CMN 11
551 #define OPCODE_ORR 12
552 #define OPCODE_MOV 13
553 #define OPCODE_BIC 14
554 #define OPCODE_MVN 15
556 #define T2_OPCODE_AND 0
557 #define T2_OPCODE_BIC 1
558 #define T2_OPCODE_ORR 2
559 #define T2_OPCODE_ORN 3
560 #define T2_OPCODE_EOR 4
561 #define T2_OPCODE_ADD 8
562 #define T2_OPCODE_ADC 10
563 #define T2_OPCODE_SBC 11
564 #define T2_OPCODE_SUB 13
565 #define T2_OPCODE_RSB 14
567 #define T_OPCODE_MUL 0x4340
568 #define T_OPCODE_TST 0x4200
569 #define T_OPCODE_CMN 0x42c0
570 #define T_OPCODE_NEG 0x4240
571 #define T_OPCODE_MVN 0x43c0
573 #define T_OPCODE_ADD_R3 0x1800
574 #define T_OPCODE_SUB_R3 0x1a00
575 #define T_OPCODE_ADD_HI 0x4400
576 #define T_OPCODE_ADD_ST 0xb000
577 #define T_OPCODE_SUB_ST 0xb080
578 #define T_OPCODE_ADD_SP 0xa800
579 #define T_OPCODE_ADD_PC 0xa000
580 #define T_OPCODE_ADD_I8 0x3000
581 #define T_OPCODE_SUB_I8 0x3800
582 #define T_OPCODE_ADD_I3 0x1c00
583 #define T_OPCODE_SUB_I3 0x1e00
585 #define T_OPCODE_ASR_R 0x4100
586 #define T_OPCODE_LSL_R 0x4080
587 #define T_OPCODE_LSR_R 0x40c0
588 #define T_OPCODE_ROR_R 0x41c0
589 #define T_OPCODE_ASR_I 0x1000
590 #define T_OPCODE_LSL_I 0x0000
591 #define T_OPCODE_LSR_I 0x0800
593 #define T_OPCODE_MOV_I8 0x2000
594 #define T_OPCODE_CMP_I8 0x2800
595 #define T_OPCODE_CMP_LR 0x4280
596 #define T_OPCODE_MOV_HR 0x4600
597 #define T_OPCODE_CMP_HR 0x4500
599 #define T_OPCODE_LDR_PC 0x4800
600 #define T_OPCODE_LDR_SP 0x9800
601 #define T_OPCODE_STR_SP 0x9000
602 #define T_OPCODE_LDR_IW 0x6800
603 #define T_OPCODE_STR_IW 0x6000
604 #define T_OPCODE_LDR_IH 0x8800
605 #define T_OPCODE_STR_IH 0x8000
606 #define T_OPCODE_LDR_IB 0x7800
607 #define T_OPCODE_STR_IB 0x7000
608 #define T_OPCODE_LDR_RW 0x5800
609 #define T_OPCODE_STR_RW 0x5000
610 #define T_OPCODE_LDR_RH 0x5a00
611 #define T_OPCODE_STR_RH 0x5200
612 #define T_OPCODE_LDR_RB 0x5c00
613 #define T_OPCODE_STR_RB 0x5400
615 #define T_OPCODE_PUSH 0xb400
616 #define T_OPCODE_POP 0xbc00
618 #define T_OPCODE_BRANCH 0xe000
620 #define THUMB_SIZE 2 /* Size of thumb instruction. */
621 #define THUMB_PP_PC_LR 0x0100
622 #define THUMB_LOAD_BIT 0x0800
623 #define THUMB2_LOAD_BIT 0x00100000
625 #define BAD_ARGS _("bad arguments to instruction")
626 #define BAD_PC _("r15 not allowed here")
627 #define BAD_COND _("instruction cannot be conditional")
628 #define BAD_OVERLAP _("registers may not be the same")
629 #define BAD_HIREG _("lo register required")
630 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
631 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
632 #define BAD_BRANCH _("branch must be last instruction in IT block")
633 #define BAD_NOT_IT _("instruction not allowed in IT block")
635 static struct hash_control
*arm_ops_hsh
;
636 static struct hash_control
*arm_cond_hsh
;
637 static struct hash_control
*arm_shift_hsh
;
638 static struct hash_control
*arm_psr_hsh
;
639 static struct hash_control
*arm_v7m_psr_hsh
;
640 static struct hash_control
*arm_reg_hsh
;
641 static struct hash_control
*arm_reloc_hsh
;
642 static struct hash_control
*arm_barrier_opt_hsh
;
644 /* Stuff needed to resolve the label ambiguity
654 symbolS
* last_label_seen
;
655 static int label_is_thumb_function_name
= FALSE
;
657 /* Literal pool structure. Held on a per-section
658 and per-sub-section basis. */
660 #define MAX_LITERAL_POOL_SIZE 1024
661 typedef struct literal_pool
663 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
664 unsigned int next_free_entry
;
669 struct literal_pool
* next
;
672 /* Pointer to a linked list of literal pools. */
673 literal_pool
* list_of_pools
= NULL
;
675 /* State variables for IT block handling. */
676 static bfd_boolean current_it_mask
= 0;
677 static int current_cc
;
682 /* This array holds the chars that always start a comment. If the
683 pre-processor is disabled, these aren't very useful. */
684 const char comment_chars
[] = "@";
686 /* This array holds the chars that only start a comment at the beginning of
687 a line. If the line seems to have the form '# 123 filename'
688 .line and .file directives will appear in the pre-processed output. */
689 /* Note that input_file.c hand checks for '#' at the beginning of the
690 first line of the input file. This is because the compiler outputs
691 #NO_APP at the beginning of its output. */
692 /* Also note that comments like this one will always work. */
693 const char line_comment_chars
[] = "#";
695 const char line_separator_chars
[] = ";";
697 /* Chars that can be used to separate mant
698 from exp in floating point numbers. */
699 const char EXP_CHARS
[] = "eE";
701 /* Chars that mean this number is a floating point constant. */
705 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
707 /* Prefix characters that indicate the start of an immediate
709 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
711 /* Separator character handling. */
713 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
716 skip_past_char (char ** str
, char c
)
726 #define skip_past_comma(str) skip_past_char (str, ',')
728 /* Arithmetic expressions (possibly involving symbols). */
730 /* Return TRUE if anything in the expression is a bignum. */
733 walk_no_bignums (symbolS
* sp
)
735 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
738 if (symbol_get_value_expression (sp
)->X_add_symbol
)
740 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
741 || (symbol_get_value_expression (sp
)->X_op_symbol
742 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
748 static int in_my_get_expression
= 0;
750 /* Third argument to my_get_expression. */
751 #define GE_NO_PREFIX 0
752 #define GE_IMM_PREFIX 1
753 #define GE_OPT_PREFIX 2
754 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
755 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
756 #define GE_OPT_PREFIX_BIG 3
759 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
764 /* In unified syntax, all prefixes are optional. */
766 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
771 case GE_NO_PREFIX
: break;
773 if (!is_immediate_prefix (**str
))
775 inst
.error
= _("immediate expression requires a # prefix");
781 case GE_OPT_PREFIX_BIG
:
782 if (is_immediate_prefix (**str
))
788 memset (ep
, 0, sizeof (expressionS
));
790 save_in
= input_line_pointer
;
791 input_line_pointer
= *str
;
792 in_my_get_expression
= 1;
793 seg
= expression (ep
);
794 in_my_get_expression
= 0;
796 if (ep
->X_op
== O_illegal
)
798 /* We found a bad expression in md_operand(). */
799 *str
= input_line_pointer
;
800 input_line_pointer
= save_in
;
801 if (inst
.error
== NULL
)
802 inst
.error
= _("bad expression");
807 if (seg
!= absolute_section
808 && seg
!= text_section
809 && seg
!= data_section
810 && seg
!= bss_section
811 && seg
!= undefined_section
)
813 inst
.error
= _("bad segment");
814 *str
= input_line_pointer
;
815 input_line_pointer
= save_in
;
820 /* Get rid of any bignums now, so that we don't generate an error for which
821 we can't establish a line number later on. Big numbers are never valid
822 in instructions, which is where this routine is always called. */
823 if (prefix_mode
!= GE_OPT_PREFIX_BIG
824 && (ep
->X_op
== O_big
826 && (walk_no_bignums (ep
->X_add_symbol
)
828 && walk_no_bignums (ep
->X_op_symbol
))))))
830 inst
.error
= _("invalid constant");
831 *str
= input_line_pointer
;
832 input_line_pointer
= save_in
;
836 *str
= input_line_pointer
;
837 input_line_pointer
= save_in
;
841 /* Turn a string in input_line_pointer into a floating point constant
842 of type TYPE, and store the appropriate bytes in *LITP. The number
843 of LITTLENUMS emitted is stored in *SIZEP. An error message is
844 returned, or NULL on OK.
846 Note that fp constants aren't represent in the normal way on the ARM.
847 In big endian mode, things are as expected. However, in little endian
848 mode fp constants are big-endian word-wise, and little-endian byte-wise
849 within the words. For example, (double) 1.1 in big endian mode is
850 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
851 the byte sequence 99 99 f1 3f 9a 99 99 99.
853 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
856 md_atof (int type
, char * litP
, int * sizeP
)
859 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
891 return _("bad call to MD_ATOF()");
894 t
= atof_ieee (input_line_pointer
, type
, words
);
896 input_line_pointer
= t
;
899 if (target_big_endian
)
901 for (i
= 0; i
< prec
; i
++)
903 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
909 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
910 for (i
= prec
- 1; i
>= 0; i
--)
912 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
916 /* For a 4 byte float the order of elements in `words' is 1 0.
917 For an 8 byte float the order is 1 0 3 2. */
918 for (i
= 0; i
< prec
; i
+= 2)
920 md_number_to_chars (litP
, (valueT
) words
[i
+ 1], 2);
921 md_number_to_chars (litP
+ 2, (valueT
) words
[i
], 2);
929 /* We handle all bad expressions here, so that we can report the faulty
930 instruction in the error message. */
932 md_operand (expressionS
* expr
)
934 if (in_my_get_expression
)
935 expr
->X_op
= O_illegal
;
938 /* Immediate values. */
940 /* Generic immediate-value read function for use in directives.
941 Accepts anything that 'expression' can fold to a constant.
942 *val receives the number. */
945 immediate_for_directive (int *val
)
948 exp
.X_op
= O_illegal
;
950 if (is_immediate_prefix (*input_line_pointer
))
952 input_line_pointer
++;
956 if (exp
.X_op
!= O_constant
)
958 as_bad (_("expected #constant"));
959 ignore_rest_of_line ();
962 *val
= exp
.X_add_number
;
967 /* Register parsing. */
969 /* Generic register parser. CCP points to what should be the
970 beginning of a register name. If it is indeed a valid register
971 name, advance CCP over it and return the reg_entry structure;
972 otherwise return NULL. Does not issue diagnostics. */
974 static struct reg_entry
*
975 arm_reg_parse_multi (char **ccp
)
979 struct reg_entry
*reg
;
981 #ifdef REGISTER_PREFIX
982 if (*start
!= REGISTER_PREFIX
)
986 #ifdef OPTIONAL_REGISTER_PREFIX
987 if (*start
== OPTIONAL_REGISTER_PREFIX
)
992 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
997 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
999 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1009 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1010 enum arm_reg_type type
)
1012 /* Alternative syntaxes are accepted for a few register classes. */
1019 /* Generic coprocessor register names are allowed for these. */
1020 if (reg
&& reg
->type
== REG_TYPE_CN
)
1025 /* For backward compatibility, a bare number is valid here. */
1027 unsigned long processor
= strtoul (start
, ccp
, 10);
1028 if (*ccp
!= start
&& processor
<= 15)
1032 case REG_TYPE_MMXWC
:
1033 /* WC includes WCG. ??? I'm not sure this is true for all
1034 instructions that take WC registers. */
1035 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1046 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1047 return value is the register number or FAIL. */
1050 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1053 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1056 /* Do not allow a scalar (reg+index) to parse as a register. */
1057 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1060 if (reg
&& reg
->type
== type
)
1063 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1070 /* Parse a Neon type specifier. *STR should point at the leading '.'
1071 character. Does no verification at this stage that the type fits the opcode
1078 Can all be legally parsed by this function.
1080 Fills in neon_type struct pointer with parsed information, and updates STR
1081 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1082 type, FAIL if not. */
1085 parse_neon_type (struct neon_type
*type
, char **str
)
1092 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1094 enum neon_el_type thistype
= NT_untyped
;
1095 unsigned thissize
= -1u;
1102 /* Just a size without an explicit type. */
1106 switch (TOLOWER (*ptr
))
1108 case 'i': thistype
= NT_integer
; break;
1109 case 'f': thistype
= NT_float
; break;
1110 case 'p': thistype
= NT_poly
; break;
1111 case 's': thistype
= NT_signed
; break;
1112 case 'u': thistype
= NT_unsigned
; break;
1114 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1120 /* .f is an abbreviation for .f32. */
1121 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1126 thissize
= strtoul (ptr
, &ptr
, 10);
1128 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1131 as_bad (_("bad size %d in type specifier"), thissize
);
1138 type
->el
[type
->elems
].type
= thistype
;
1139 type
->el
[type
->elems
].size
= thissize
;
1144 /* Empty/missing type is not a successful parse. */
1145 if (type
->elems
== 0)
1153 /* Errors may be set multiple times during parsing or bit encoding
1154 (particularly in the Neon bits), but usually the earliest error which is set
1155 will be the most meaningful. Avoid overwriting it with later (cascading)
1156 errors by calling this function. */
1159 first_error (const char *err
)
1165 /* Parse a single type, e.g. ".s32", leading period included. */
1167 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1170 struct neon_type optype
;
1174 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1176 if (optype
.elems
== 1)
1177 *vectype
= optype
.el
[0];
1180 first_error (_("only one type should be specified for operand"));
1186 first_error (_("vector type expected"));
1198 /* Special meanings for indices (which have a range of 0-7), which will fit into
1201 #define NEON_ALL_LANES 15
1202 #define NEON_INTERLEAVE_LANES 14
1204 /* Parse either a register or a scalar, with an optional type. Return the
1205 register number, and optionally fill in the actual type of the register
1206 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1207 type/index information in *TYPEINFO. */
1210 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1211 enum arm_reg_type
*rtype
,
1212 struct neon_typed_alias
*typeinfo
)
1215 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1216 struct neon_typed_alias atype
;
1217 struct neon_type_el parsetype
;
1221 atype
.eltype
.type
= NT_invtype
;
1222 atype
.eltype
.size
= -1;
1224 /* Try alternate syntax for some types of register. Note these are mutually
1225 exclusive with the Neon syntax extensions. */
1228 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1236 /* Undo polymorphism for Neon D and Q registers. */
1237 if (type
== REG_TYPE_NDQ
1238 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1241 if (type
!= reg
->type
)
1247 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1249 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1251 first_error (_("can't redefine type for operand"));
1254 atype
.defined
|= NTA_HASTYPE
;
1255 atype
.eltype
= parsetype
;
1258 if (skip_past_char (&str
, '[') == SUCCESS
)
1260 if (type
!= REG_TYPE_VFD
)
1262 first_error (_("only D registers may be indexed"));
1266 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1268 first_error (_("can't change index for operand"));
1272 atype
.defined
|= NTA_HASINDEX
;
1274 if (skip_past_char (&str
, ']') == SUCCESS
)
1275 atype
.index
= NEON_ALL_LANES
;
1280 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1282 if (exp
.X_op
!= O_constant
)
1284 first_error (_("constant expression required"));
1288 if (skip_past_char (&str
, ']') == FAIL
)
1291 atype
.index
= exp
.X_add_number
;
1306 /* Like arm_reg_parse, but allow allow the following extra features:
1307 - If RTYPE is non-zero, return the (possibly restricted) type of the
1308 register (e.g. Neon double or quad reg when either has been requested).
1309 - If this is a Neon vector type with additional type information, fill
1310 in the struct pointed to by VECTYPE (if non-NULL).
1311 This function will fault on encountering a scalar.
1315 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1316 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1318 struct neon_typed_alias atype
;
1320 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1325 /* Do not allow a scalar (reg+index) to parse as a register. */
1326 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1328 first_error (_("register operand expected, but got scalar"));
1333 *vectype
= atype
.eltype
;
1340 #define NEON_SCALAR_REG(X) ((X) >> 4)
1341 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1343 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1344 have enough information to be able to do a good job bounds-checking. So, we
1345 just do easy checks here, and do further checks later. */
1348 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1352 struct neon_typed_alias atype
;
1354 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1356 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1359 if (atype
.index
== NEON_ALL_LANES
)
1361 first_error (_("scalar must have an index"));
1364 else if (atype
.index
>= 64 / elsize
)
1366 first_error (_("scalar index out of range"));
1371 *type
= atype
.eltype
;
1375 return reg
* 16 + atype
.index
;
1378 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1380 parse_reg_list (char ** strp
)
1382 char * str
= * strp
;
1386 /* We come back here if we get ranges concatenated by '+' or '|'. */
1401 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1403 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1413 first_error (_("bad range in register list"));
1417 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1419 if (range
& (1 << i
))
1421 (_("Warning: duplicated register (r%d) in register list"),
1429 if (range
& (1 << reg
))
1430 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1432 else if (reg
<= cur_reg
)
1433 as_tsktsk (_("Warning: register range not in ascending order"));
1438 while (skip_past_comma (&str
) != FAIL
1439 || (in_range
= 1, *str
++ == '-'));
1444 first_error (_("missing `}'"));
1452 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1455 if (expr
.X_op
== O_constant
)
1457 if (expr
.X_add_number
1458 != (expr
.X_add_number
& 0x0000ffff))
1460 inst
.error
= _("invalid register mask");
1464 if ((range
& expr
.X_add_number
) != 0)
1466 int regno
= range
& expr
.X_add_number
;
1469 regno
= (1 << regno
) - 1;
1471 (_("Warning: duplicated register (r%d) in register list"),
1475 range
|= expr
.X_add_number
;
1479 if (inst
.reloc
.type
!= 0)
1481 inst
.error
= _("expression too complex");
1485 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1486 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1487 inst
.reloc
.pc_rel
= 0;
1491 if (*str
== '|' || *str
== '+')
1497 while (another_range
);
1503 /* Types of registers in a list. */
1512 /* Parse a VFP register list. If the string is invalid return FAIL.
1513 Otherwise return the number of registers, and set PBASE to the first
1514 register. Parses registers of type ETYPE.
1515 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1516 - Q registers can be used to specify pairs of D registers
1517 - { } can be omitted from around a singleton register list
1518 FIXME: This is not implemented, as it would require backtracking in
1521 This could be done (the meaning isn't really ambiguous), but doesn't
1522 fit in well with the current parsing framework.
1523 - 32 D registers may be used (also true for VFPv3).
1524 FIXME: Types are ignored in these register lists, which is probably a
1528 parse_vfp_reg_list (char **str
, unsigned int *pbase
, enum reg_list_els etype
)
1532 enum arm_reg_type regtype
= 0;
1536 unsigned long mask
= 0;
1541 inst
.error
= _("expecting {");
1550 regtype
= REG_TYPE_VFS
;
1555 regtype
= REG_TYPE_VFD
;
1556 /* VFPv3 allows 32 D registers. */
1557 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
1561 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1564 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1571 case REGLIST_NEON_D
:
1572 regtype
= REG_TYPE_NDQ
;
1577 base_reg
= max_regs
;
1581 int setmask
= 1, addregs
= 1;
1583 new_base
= arm_typed_reg_parse (str
, regtype
, ®type
, NULL
);
1585 if (new_base
== FAIL
)
1587 first_error (_(reg_expected_msgs
[regtype
]));
1591 /* Note: a value of 2 * n is returned for the register Q<n>. */
1592 if (regtype
== REG_TYPE_NQ
)
1598 if (new_base
< base_reg
)
1599 base_reg
= new_base
;
1601 if (mask
& (setmask
<< new_base
))
1603 first_error (_("invalid register list"));
1607 if ((mask
>> new_base
) != 0 && ! warned
)
1609 as_tsktsk (_("register list not in ascending order"));
1613 mask
|= setmask
<< new_base
;
1616 if (**str
== '-') /* We have the start of a range expression */
1622 if ((high_range
= arm_typed_reg_parse (str
, regtype
, NULL
, NULL
))
1625 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1629 if (regtype
== REG_TYPE_NQ
)
1630 high_range
= high_range
+ 1;
1632 if (high_range
<= new_base
)
1634 inst
.error
= _("register range not in ascending order");
1638 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1640 if (mask
& (setmask
<< new_base
))
1642 inst
.error
= _("invalid register list");
1646 mask
|= setmask
<< new_base
;
1651 while (skip_past_comma (str
) != FAIL
);
1655 /* Sanity check -- should have raised a parse error above. */
1656 if (count
== 0 || count
> max_regs
)
1661 /* Final test -- the registers must be consecutive. */
1663 for (i
= 0; i
< count
; i
++)
1665 if ((mask
& (1u << i
)) == 0)
1667 inst
.error
= _("non-contiguous register range");
1675 /* True if two alias types are the same. */
1678 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1686 if (a
->defined
!= b
->defined
)
1689 if ((a
->defined
& NTA_HASTYPE
) != 0
1690 && (a
->eltype
.type
!= b
->eltype
.type
1691 || a
->eltype
.size
!= b
->eltype
.size
))
1694 if ((a
->defined
& NTA_HASINDEX
) != 0
1695 && (a
->index
!= b
->index
))
1701 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1702 The base register is put in *PBASE.
1703 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1705 The register stride (minus one) is put in bit 4 of the return value.
1706 Bits [6:5] encode the list length (minus one).
1707 The type of the list elements is put in *ELTYPE, if non-NULL. */
1709 #define NEON_LANE(X) ((X) & 0xf)
1710 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1711 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1714 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1715 struct neon_type_el
*eltype
)
1722 int leading_brace
= 0;
1723 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1725 const char *const incr_error
= "register stride must be 1 or 2";
1726 const char *const type_error
= "mismatched element/structure types in list";
1727 struct neon_typed_alias firsttype
;
1729 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1734 struct neon_typed_alias atype
;
1735 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1739 first_error (_(reg_expected_msgs
[rtype
]));
1746 if (rtype
== REG_TYPE_NQ
)
1753 else if (reg_incr
== -1)
1755 reg_incr
= getreg
- base_reg
;
1756 if (reg_incr
< 1 || reg_incr
> 2)
1758 first_error (_(incr_error
));
1762 else if (getreg
!= base_reg
+ reg_incr
* count
)
1764 first_error (_(incr_error
));
1768 if (!neon_alias_types_same (&atype
, &firsttype
))
1770 first_error (_(type_error
));
1774 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1778 struct neon_typed_alias htype
;
1779 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1781 lane
= NEON_INTERLEAVE_LANES
;
1782 else if (lane
!= NEON_INTERLEAVE_LANES
)
1784 first_error (_(type_error
));
1789 else if (reg_incr
!= 1)
1791 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1795 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1798 first_error (_(reg_expected_msgs
[rtype
]));
1801 if (!neon_alias_types_same (&htype
, &firsttype
))
1803 first_error (_(type_error
));
1806 count
+= hireg
+ dregs
- getreg
;
1810 /* If we're using Q registers, we can't use [] or [n] syntax. */
1811 if (rtype
== REG_TYPE_NQ
)
1817 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1821 else if (lane
!= atype
.index
)
1823 first_error (_(type_error
));
1827 else if (lane
== -1)
1828 lane
= NEON_INTERLEAVE_LANES
;
1829 else if (lane
!= NEON_INTERLEAVE_LANES
)
1831 first_error (_(type_error
));
1836 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1838 /* No lane set by [x]. We must be interleaving structures. */
1840 lane
= NEON_INTERLEAVE_LANES
;
1843 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1844 || (count
> 1 && reg_incr
== -1))
1846 first_error (_("error parsing element/structure list"));
1850 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1852 first_error (_("expected }"));
1860 *eltype
= firsttype
.eltype
;
1865 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1868 /* Parse an explicit relocation suffix on an expression. This is
1869 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1870 arm_reloc_hsh contains no entries, so this function can only
1871 succeed if there is no () after the word. Returns -1 on error,
1872 BFD_RELOC_UNUSED if there wasn't any suffix. */
1874 parse_reloc (char **str
)
1876 struct reloc_entry
*r
;
1880 return BFD_RELOC_UNUSED
;
1885 while (*q
&& *q
!= ')' && *q
!= ',')
1890 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1897 /* Directives: register aliases. */
1899 static struct reg_entry
*
1900 insert_reg_alias (char *str
, int number
, int type
)
1902 struct reg_entry
*new;
1905 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1908 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1910 /* Only warn about a redefinition if it's not defined as the
1912 else if (new->number
!= number
|| new->type
!= type
)
1913 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1918 name
= xstrdup (str
);
1919 new = xmalloc (sizeof (struct reg_entry
));
1922 new->number
= number
;
1924 new->builtin
= FALSE
;
1927 if (hash_insert (arm_reg_hsh
, name
, (PTR
) new))
1934 insert_neon_reg_alias (char *str
, int number
, int type
,
1935 struct neon_typed_alias
*atype
)
1937 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
1941 first_error (_("attempt to redefine typed alias"));
1947 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
1948 *reg
->neon
= *atype
;
1952 /* Look for the .req directive. This is of the form:
1954 new_register_name .req existing_register_name
1956 If we find one, or if it looks sufficiently like one that we want to
1957 handle any error here, return non-zero. Otherwise return zero. */
1960 create_register_alias (char * newname
, char *p
)
1962 struct reg_entry
*old
;
1963 char *oldname
, *nbuf
;
1966 /* The input scrubber ensures that whitespace after the mnemonic is
1967 collapsed to single spaces. */
1969 if (strncmp (oldname
, " .req ", 6) != 0)
1973 if (*oldname
== '\0')
1976 old
= hash_find (arm_reg_hsh
, oldname
);
1979 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
1983 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1984 the desired alias name, and p points to its end. If not, then
1985 the desired alias name is in the global original_case_string. */
1986 #ifdef TC_CASE_SENSITIVE
1989 newname
= original_case_string
;
1990 nlen
= strlen (newname
);
1993 nbuf
= alloca (nlen
+ 1);
1994 memcpy (nbuf
, newname
, nlen
);
1997 /* Create aliases under the new name as stated; an all-lowercase
1998 version of the new name; and an all-uppercase version of the new
2000 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2002 for (p
= nbuf
; *p
; p
++)
2005 if (strncmp (nbuf
, newname
, nlen
))
2006 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2008 for (p
= nbuf
; *p
; p
++)
2011 if (strncmp (nbuf
, newname
, nlen
))
2012 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2017 /* Create a Neon typed/indexed register alias using directives, e.g.:
2022 These typed registers can be used instead of the types specified after the
2023 Neon mnemonic, so long as all operands given have types. Types can also be
2024 specified directly, e.g.:
2025 vadd d0.s32, d1.s32, d2.s32
2029 create_neon_reg_alias (char *newname
, char *p
)
2031 enum arm_reg_type basetype
;
2032 struct reg_entry
*basereg
;
2033 struct reg_entry mybasereg
;
2034 struct neon_type ntype
;
2035 struct neon_typed_alias typeinfo
;
2036 char *namebuf
, *nameend
;
2039 typeinfo
.defined
= 0;
2040 typeinfo
.eltype
.type
= NT_invtype
;
2041 typeinfo
.eltype
.size
= -1;
2042 typeinfo
.index
= -1;
2046 if (strncmp (p
, " .dn ", 5) == 0)
2047 basetype
= REG_TYPE_VFD
;
2048 else if (strncmp (p
, " .qn ", 5) == 0)
2049 basetype
= REG_TYPE_NQ
;
2058 basereg
= arm_reg_parse_multi (&p
);
2060 if (basereg
&& basereg
->type
!= basetype
)
2062 as_bad (_("bad type for register"));
2066 if (basereg
== NULL
)
2069 /* Try parsing as an integer. */
2070 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2071 if (exp
.X_op
!= O_constant
)
2073 as_bad (_("expression must be constant"));
2076 basereg
= &mybasereg
;
2077 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2083 typeinfo
= *basereg
->neon
;
2085 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2087 /* We got a type. */
2088 if (typeinfo
.defined
& NTA_HASTYPE
)
2090 as_bad (_("can't redefine the type of a register alias"));
2094 typeinfo
.defined
|= NTA_HASTYPE
;
2095 if (ntype
.elems
!= 1)
2097 as_bad (_("you must specify a single type only"));
2100 typeinfo
.eltype
= ntype
.el
[0];
2103 if (skip_past_char (&p
, '[') == SUCCESS
)
2106 /* We got a scalar index. */
2108 if (typeinfo
.defined
& NTA_HASINDEX
)
2110 as_bad (_("can't redefine the index of a scalar alias"));
2114 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2116 if (exp
.X_op
!= O_constant
)
2118 as_bad (_("scalar index must be constant"));
2122 typeinfo
.defined
|= NTA_HASINDEX
;
2123 typeinfo
.index
= exp
.X_add_number
;
2125 if (skip_past_char (&p
, ']') == FAIL
)
2127 as_bad (_("expecting ]"));
2132 namelen
= nameend
- newname
;
2133 namebuf
= alloca (namelen
+ 1);
2134 strncpy (namebuf
, newname
, namelen
);
2135 namebuf
[namelen
] = '\0';
2137 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2138 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2140 /* Insert name in all uppercase. */
2141 for (p
= namebuf
; *p
; p
++)
2144 if (strncmp (namebuf
, newname
, namelen
))
2145 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2146 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2148 /* Insert name in all lowercase. */
2149 for (p
= namebuf
; *p
; p
++)
2152 if (strncmp (namebuf
, newname
, namelen
))
2153 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2154 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2159 /* Should never be called, as .req goes between the alias and the
2160 register name, not at the beginning of the line. */
2162 s_req (int a ATTRIBUTE_UNUSED
)
2164 as_bad (_("invalid syntax for .req directive"));
2168 s_dn (int a ATTRIBUTE_UNUSED
)
2170 as_bad (_("invalid syntax for .dn directive"));
2174 s_qn (int a ATTRIBUTE_UNUSED
)
2176 as_bad (_("invalid syntax for .qn directive"));
2179 /* The .unreq directive deletes an alias which was previously defined
2180 by .req. For example:
2186 s_unreq (int a ATTRIBUTE_UNUSED
)
2191 name
= input_line_pointer
;
2193 while (*input_line_pointer
!= 0
2194 && *input_line_pointer
!= ' '
2195 && *input_line_pointer
!= '\n')
2196 ++input_line_pointer
;
2198 saved_char
= *input_line_pointer
;
2199 *input_line_pointer
= 0;
2202 as_bad (_("invalid syntax for .unreq directive"));
2205 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2208 as_bad (_("unknown register alias '%s'"), name
);
2209 else if (reg
->builtin
)
2210 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2214 hash_delete (arm_reg_hsh
, name
);
2215 free ((char *) reg
->name
);
2222 *input_line_pointer
= saved_char
;
2223 demand_empty_rest_of_line ();
2226 /* Directives: Instruction set selection. */
2229 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2230 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2231 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2232 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2234 static enum mstate mapstate
= MAP_UNDEFINED
;
2237 mapping_state (enum mstate state
)
2240 const char * symname
;
2243 if (mapstate
== state
)
2244 /* The mapping symbol has already been emitted.
2245 There is nothing else to do. */
2254 type
= BSF_NO_FLAGS
;
2258 type
= BSF_NO_FLAGS
;
2262 type
= BSF_NO_FLAGS
;
2270 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2272 symbolP
= symbol_new (symname
, now_seg
, (valueT
) frag_now_fix (), frag_now
);
2273 symbol_table_insert (symbolP
);
2274 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2279 THUMB_SET_FUNC (symbolP
, 0);
2280 ARM_SET_THUMB (symbolP
, 0);
2281 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2285 THUMB_SET_FUNC (symbolP
, 1);
2286 ARM_SET_THUMB (symbolP
, 1);
2287 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2296 #define mapping_state(x) /* nothing */
2299 /* Find the real, Thumb encoded start of a Thumb function. */
2302 find_real_start (symbolS
* symbolP
)
2305 const char * name
= S_GET_NAME (symbolP
);
2306 symbolS
* new_target
;
2308 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2309 #define STUB_NAME ".real_start_of"
2314 /* The compiler may generate BL instructions to local labels because
2315 it needs to perform a branch to a far away location. These labels
2316 do not have a corresponding ".real_start_of" label. We check
2317 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2318 the ".real_start_of" convention for nonlocal branches. */
2319 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2322 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2323 new_target
= symbol_find (real_start
);
2325 if (new_target
== NULL
)
2327 as_warn ("Failed to find real start of function: %s\n", name
);
2328 new_target
= symbolP
;
2335 opcode_select (int width
)
2342 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2343 as_bad (_("selected processor does not support THUMB opcodes"));
2346 /* No need to force the alignment, since we will have been
2347 coming from ARM mode, which is word-aligned. */
2348 record_alignment (now_seg
, 1);
2350 mapping_state (MAP_THUMB
);
2356 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2357 as_bad (_("selected processor does not support ARM opcodes"));
2362 frag_align (2, 0, 0);
2364 record_alignment (now_seg
, 1);
2366 mapping_state (MAP_ARM
);
2370 as_bad (_("invalid instruction size selected (%d)"), width
);
2375 s_arm (int ignore ATTRIBUTE_UNUSED
)
2378 demand_empty_rest_of_line ();
2382 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2385 demand_empty_rest_of_line ();
2389 s_code (int unused ATTRIBUTE_UNUSED
)
2393 temp
= get_absolute_expression ();
2398 opcode_select (temp
);
2402 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2407 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2409 /* If we are not already in thumb mode go into it, EVEN if
2410 the target processor does not support thumb instructions.
2411 This is used by gcc/config/arm/lib1funcs.asm for example
2412 to compile interworking support functions even if the
2413 target processor should not support interworking. */
2417 record_alignment (now_seg
, 1);
2420 demand_empty_rest_of_line ();
2424 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2428 /* The following label is the name/address of the start of a Thumb function.
2429 We need to know this for the interworking support. */
2430 label_is_thumb_function_name
= TRUE
;
2433 /* Perform a .set directive, but also mark the alias as
2434 being a thumb function. */
2437 s_thumb_set (int equiv
)
2439 /* XXX the following is a duplicate of the code for s_set() in read.c
2440 We cannot just call that code as we need to get at the symbol that
2447 /* Especial apologies for the random logic:
2448 This just grew, and could be parsed much more simply!
2450 name
= input_line_pointer
;
2451 delim
= get_symbol_end ();
2452 end_name
= input_line_pointer
;
2455 if (*input_line_pointer
!= ',')
2458 as_bad (_("expected comma after name \"%s\""), name
);
2460 ignore_rest_of_line ();
2464 input_line_pointer
++;
2467 if (name
[0] == '.' && name
[1] == '\0')
2469 /* XXX - this should not happen to .thumb_set. */
2473 if ((symbolP
= symbol_find (name
)) == NULL
2474 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2477 /* When doing symbol listings, play games with dummy fragments living
2478 outside the normal fragment chain to record the file and line info
2480 if (listing
& LISTING_SYMBOLS
)
2482 extern struct list_info_struct
* listing_tail
;
2483 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2485 memset (dummy_frag
, 0, sizeof (fragS
));
2486 dummy_frag
->fr_type
= rs_fill
;
2487 dummy_frag
->line
= listing_tail
;
2488 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2489 dummy_frag
->fr_symbol
= symbolP
;
2493 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2496 /* "set" symbols are local unless otherwise specified. */
2497 SF_SET_LOCAL (symbolP
);
2498 #endif /* OBJ_COFF */
2499 } /* Make a new symbol. */
2501 symbol_table_insert (symbolP
);
2506 && S_IS_DEFINED (symbolP
)
2507 && S_GET_SEGMENT (symbolP
) != reg_section
)
2508 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2510 pseudo_set (symbolP
);
2512 demand_empty_rest_of_line ();
2514 /* XXX Now we come to the Thumb specific bit of code. */
2516 THUMB_SET_FUNC (symbolP
, 1);
2517 ARM_SET_THUMB (symbolP
, 1);
2518 #if defined OBJ_ELF || defined OBJ_COFF
2519 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2523 /* Directives: Mode selection. */
2525 /* .syntax [unified|divided] - choose the new unified syntax
2526 (same for Arm and Thumb encoding, modulo slight differences in what
2527 can be represented) or the old divergent syntax for each mode. */
2529 s_syntax (int unused ATTRIBUTE_UNUSED
)
2533 name
= input_line_pointer
;
2534 delim
= get_symbol_end ();
2536 if (!strcasecmp (name
, "unified"))
2537 unified_syntax
= TRUE
;
2538 else if (!strcasecmp (name
, "divided"))
2539 unified_syntax
= FALSE
;
2542 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2545 *input_line_pointer
= delim
;
2546 demand_empty_rest_of_line ();
2549 /* Directives: sectioning and alignment. */
2551 /* Same as s_align_ptwo but align 0 => align 2. */
2554 s_align (int unused ATTRIBUTE_UNUSED
)
2558 long max_alignment
= 15;
2560 temp
= get_absolute_expression ();
2561 if (temp
> max_alignment
)
2562 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2565 as_bad (_("alignment negative. 0 assumed."));
2569 if (*input_line_pointer
== ',')
2571 input_line_pointer
++;
2572 temp_fill
= get_absolute_expression ();
2580 /* Only make a frag if we HAVE to. */
2581 if (temp
&& !need_pass_2
)
2582 frag_align (temp
, (int) temp_fill
, 0);
2583 demand_empty_rest_of_line ();
2585 record_alignment (now_seg
, temp
);
2589 s_bss (int ignore ATTRIBUTE_UNUSED
)
2591 /* We don't support putting frags in the BSS segment, we fake it by
2592 marking in_bss, then looking at s_skip for clues. */
2593 subseg_set (bss_section
, 0);
2594 demand_empty_rest_of_line ();
2595 mapping_state (MAP_DATA
);
2599 s_even (int ignore ATTRIBUTE_UNUSED
)
2601 /* Never make frag if expect extra pass. */
2603 frag_align (1, 0, 0);
2605 record_alignment (now_seg
, 1);
2607 demand_empty_rest_of_line ();
2610 /* Directives: Literal pools. */
2612 static literal_pool
*
2613 find_literal_pool (void)
2615 literal_pool
* pool
;
2617 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2619 if (pool
->section
== now_seg
2620 && pool
->sub_section
== now_subseg
)
2627 static literal_pool
*
2628 find_or_make_literal_pool (void)
2630 /* Next literal pool ID number. */
2631 static unsigned int latest_pool_num
= 1;
2632 literal_pool
* pool
;
2634 pool
= find_literal_pool ();
2638 /* Create a new pool. */
2639 pool
= xmalloc (sizeof (* pool
));
2643 pool
->next_free_entry
= 0;
2644 pool
->section
= now_seg
;
2645 pool
->sub_section
= now_subseg
;
2646 pool
->next
= list_of_pools
;
2647 pool
->symbol
= NULL
;
2649 /* Add it to the list. */
2650 list_of_pools
= pool
;
2653 /* New pools, and emptied pools, will have a NULL symbol. */
2654 if (pool
->symbol
== NULL
)
2656 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2657 (valueT
) 0, &zero_address_frag
);
2658 pool
->id
= latest_pool_num
++;
2665 /* Add the literal in the global 'inst'
2666 structure to the relevent literal pool. */
2669 add_to_lit_pool (void)
2671 literal_pool
* pool
;
2674 pool
= find_or_make_literal_pool ();
2676 /* Check if this literal value is already in the pool. */
2677 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2679 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2680 && (inst
.reloc
.exp
.X_op
== O_constant
)
2681 && (pool
->literals
[entry
].X_add_number
2682 == inst
.reloc
.exp
.X_add_number
)
2683 && (pool
->literals
[entry
].X_unsigned
2684 == inst
.reloc
.exp
.X_unsigned
))
2687 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2688 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2689 && (pool
->literals
[entry
].X_add_number
2690 == inst
.reloc
.exp
.X_add_number
)
2691 && (pool
->literals
[entry
].X_add_symbol
2692 == inst
.reloc
.exp
.X_add_symbol
)
2693 && (pool
->literals
[entry
].X_op_symbol
2694 == inst
.reloc
.exp
.X_op_symbol
))
2698 /* Do we need to create a new entry? */
2699 if (entry
== pool
->next_free_entry
)
2701 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2703 inst
.error
= _("literal pool overflow");
2707 pool
->literals
[entry
] = inst
.reloc
.exp
;
2708 pool
->next_free_entry
+= 1;
2711 inst
.reloc
.exp
.X_op
= O_symbol
;
2712 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
2713 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2718 /* Can't use symbol_new here, so have to create a symbol and then at
2719 a later date assign it a value. Thats what these functions do. */
2722 symbol_locate (symbolS
* symbolP
,
2723 const char * name
, /* It is copied, the caller can modify. */
2724 segT segment
, /* Segment identifier (SEG_<something>). */
2725 valueT valu
, /* Symbol value. */
2726 fragS
* frag
) /* Associated fragment. */
2728 unsigned int name_length
;
2729 char * preserved_copy_of_name
;
2731 name_length
= strlen (name
) + 1; /* +1 for \0. */
2732 obstack_grow (¬es
, name
, name_length
);
2733 preserved_copy_of_name
= obstack_finish (¬es
);
2735 #ifdef tc_canonicalize_symbol_name
2736 preserved_copy_of_name
=
2737 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2740 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2742 S_SET_SEGMENT (symbolP
, segment
);
2743 S_SET_VALUE (symbolP
, valu
);
2744 symbol_clear_list_pointers (symbolP
);
2746 symbol_set_frag (symbolP
, frag
);
2748 /* Link to end of symbol chain. */
2750 extern int symbol_table_frozen
;
2752 if (symbol_table_frozen
)
2756 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2758 obj_symbol_new_hook (symbolP
);
2760 #ifdef tc_symbol_new_hook
2761 tc_symbol_new_hook (symbolP
);
2765 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2766 #endif /* DEBUG_SYMS */
2771 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
2774 literal_pool
* pool
;
2777 pool
= find_literal_pool ();
2779 || pool
->symbol
== NULL
2780 || pool
->next_free_entry
== 0)
2783 mapping_state (MAP_DATA
);
2785 /* Align pool as you have word accesses.
2786 Only make a frag if we have to. */
2788 frag_align (2, 0, 0);
2790 record_alignment (now_seg
, 2);
2792 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2794 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2795 (valueT
) frag_now_fix (), frag_now
);
2796 symbol_table_insert (pool
->symbol
);
2798 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2800 #if defined OBJ_COFF || defined OBJ_ELF
2801 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2804 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2805 /* First output the expression in the instruction to the pool. */
2806 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2808 /* Mark the pool as empty. */
2809 pool
->next_free_entry
= 0;
2810 pool
->symbol
= NULL
;
2814 /* Forward declarations for functions below, in the MD interface
2816 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
2817 static valueT
create_unwind_entry (int);
2818 static void start_unwind_section (const segT
, int);
2819 static void add_unwind_opcode (valueT
, int);
2820 static void flush_pending_unwind (void);
2822 /* Directives: Data. */
2825 s_arm_elf_cons (int nbytes
)
2829 #ifdef md_flush_pending_output
2830 md_flush_pending_output ();
2833 if (is_it_end_of_statement ())
2835 demand_empty_rest_of_line ();
2839 #ifdef md_cons_align
2840 md_cons_align (nbytes
);
2843 mapping_state (MAP_DATA
);
2847 char *base
= input_line_pointer
;
2851 if (exp
.X_op
!= O_symbol
)
2852 emit_expr (&exp
, (unsigned int) nbytes
);
2855 char *before_reloc
= input_line_pointer
;
2856 reloc
= parse_reloc (&input_line_pointer
);
2859 as_bad (_("unrecognized relocation suffix"));
2860 ignore_rest_of_line ();
2863 else if (reloc
== BFD_RELOC_UNUSED
)
2864 emit_expr (&exp
, (unsigned int) nbytes
);
2867 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
2868 int size
= bfd_get_reloc_size (howto
);
2870 if (reloc
== BFD_RELOC_ARM_PLT32
)
2872 as_bad (_("(plt) is only valid on branch targets"));
2873 reloc
= BFD_RELOC_UNUSED
;
2878 as_bad (_("%s relocations do not fit in %d bytes"),
2879 howto
->name
, nbytes
);
2882 /* We've parsed an expression stopping at O_symbol.
2883 But there may be more expression left now that we
2884 have parsed the relocation marker. Parse it again.
2885 XXX Surely there is a cleaner way to do this. */
2886 char *p
= input_line_pointer
;
2888 char *save_buf
= alloca (input_line_pointer
- base
);
2889 memcpy (save_buf
, base
, input_line_pointer
- base
);
2890 memmove (base
+ (input_line_pointer
- before_reloc
),
2891 base
, before_reloc
- base
);
2893 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
2895 memcpy (base
, save_buf
, p
- base
);
2897 offset
= nbytes
- size
;
2898 p
= frag_more ((int) nbytes
);
2899 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
2900 size
, &exp
, 0, reloc
);
2905 while (*input_line_pointer
++ == ',');
2907 /* Put terminator back into stream. */
2908 input_line_pointer
--;
2909 demand_empty_rest_of_line ();
2913 /* Parse a .rel31 directive. */
2916 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
2923 if (*input_line_pointer
== '1')
2924 highbit
= 0x80000000;
2925 else if (*input_line_pointer
!= '0')
2926 as_bad (_("expected 0 or 1"));
2928 input_line_pointer
++;
2929 if (*input_line_pointer
!= ',')
2930 as_bad (_("missing comma"));
2931 input_line_pointer
++;
2933 #ifdef md_flush_pending_output
2934 md_flush_pending_output ();
2937 #ifdef md_cons_align
2941 mapping_state (MAP_DATA
);
2946 md_number_to_chars (p
, highbit
, 4);
2947 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
2948 BFD_RELOC_ARM_PREL31
);
2950 demand_empty_rest_of_line ();
2953 /* Directives: AEABI stack-unwind tables. */
2955 /* Parse an unwind_fnstart directive. Simply records the current location. */
2958 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
2960 demand_empty_rest_of_line ();
2961 /* Mark the start of the function. */
2962 unwind
.proc_start
= expr_build_dot ();
2964 /* Reset the rest of the unwind info. */
2965 unwind
.opcode_count
= 0;
2966 unwind
.table_entry
= NULL
;
2967 unwind
.personality_routine
= NULL
;
2968 unwind
.personality_index
= -1;
2969 unwind
.frame_size
= 0;
2970 unwind
.fp_offset
= 0;
2973 unwind
.sp_restored
= 0;
2977 /* Parse a handlerdata directive. Creates the exception handling table entry
2978 for the function. */
2981 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
2983 demand_empty_rest_of_line ();
2984 if (unwind
.table_entry
)
2985 as_bad (_("dupicate .handlerdata directive"));
2987 create_unwind_entry (1);
2990 /* Parse an unwind_fnend directive. Generates the index table entry. */
2993 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
2999 demand_empty_rest_of_line ();
3001 /* Add eh table entry. */
3002 if (unwind
.table_entry
== NULL
)
3003 val
= create_unwind_entry (0);
3007 /* Add index table entry. This is two words. */
3008 start_unwind_section (unwind
.saved_seg
, 1);
3009 frag_align (2, 0, 0);
3010 record_alignment (now_seg
, 2);
3012 ptr
= frag_more (8);
3013 where
= frag_now_fix () - 8;
3015 /* Self relative offset of the function start. */
3016 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3017 BFD_RELOC_ARM_PREL31
);
3019 /* Indicate dependency on EHABI-defined personality routines to the
3020 linker, if it hasn't been done already. */
3021 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3022 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3024 static const char *const name
[] = {
3025 "__aeabi_unwind_cpp_pr0",
3026 "__aeabi_unwind_cpp_pr1",
3027 "__aeabi_unwind_cpp_pr2"
3029 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3030 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3031 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3032 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3033 = marked_pr_dependency
;
3037 /* Inline exception table entry. */
3038 md_number_to_chars (ptr
+ 4, val
, 4);
3040 /* Self relative offset of the table entry. */
3041 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3042 BFD_RELOC_ARM_PREL31
);
3044 /* Restore the original section. */
3045 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3049 /* Parse an unwind_cantunwind directive. */
3052 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3054 demand_empty_rest_of_line ();
3055 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3056 as_bad (_("personality routine specified for cantunwind frame"));
3058 unwind
.personality_index
= -2;
3062 /* Parse a personalityindex directive. */
3065 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3069 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3070 as_bad (_("duplicate .personalityindex directive"));
3074 if (exp
.X_op
!= O_constant
3075 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3077 as_bad (_("bad personality routine number"));
3078 ignore_rest_of_line ();
3082 unwind
.personality_index
= exp
.X_add_number
;
3084 demand_empty_rest_of_line ();
3088 /* Parse a personality directive. */
3091 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3095 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3096 as_bad (_("duplicate .personality directive"));
3098 name
= input_line_pointer
;
3099 c
= get_symbol_end ();
3100 p
= input_line_pointer
;
3101 unwind
.personality_routine
= symbol_find_or_make (name
);
3103 demand_empty_rest_of_line ();
3107 /* Parse a directive saving core registers. */
3110 s_arm_unwind_save_core (void)
3116 range
= parse_reg_list (&input_line_pointer
);
3119 as_bad (_("expected register list"));
3120 ignore_rest_of_line ();
3124 demand_empty_rest_of_line ();
3126 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3127 into .unwind_save {..., sp...}. We aren't bothered about the value of
3128 ip because it is clobbered by calls. */
3129 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3130 && (range
& 0x3000) == 0x1000)
3132 unwind
.opcode_count
--;
3133 unwind
.sp_restored
= 0;
3134 range
= (range
| 0x2000) & ~0x1000;
3135 unwind
.pending_offset
= 0;
3141 /* See if we can use the short opcodes. These pop a block of up to 8
3142 registers starting with r4, plus maybe r14. */
3143 for (n
= 0; n
< 8; n
++)
3145 /* Break at the first non-saved register. */
3146 if ((range
& (1 << (n
+ 4))) == 0)
3149 /* See if there are any other bits set. */
3150 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3152 /* Use the long form. */
3153 op
= 0x8000 | ((range
>> 4) & 0xfff);
3154 add_unwind_opcode (op
, 2);
3158 /* Use the short form. */
3160 op
= 0xa8; /* Pop r14. */
3162 op
= 0xa0; /* Do not pop r14. */
3164 add_unwind_opcode (op
, 1);
3171 op
= 0xb100 | (range
& 0xf);
3172 add_unwind_opcode (op
, 2);
3175 /* Record the number of bytes pushed. */
3176 for (n
= 0; n
< 16; n
++)
3178 if (range
& (1 << n
))
3179 unwind
.frame_size
+= 4;
3184 /* Parse a directive saving FPA registers. */
3187 s_arm_unwind_save_fpa (int reg
)
3193 /* Get Number of registers to transfer. */
3194 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3197 exp
.X_op
= O_illegal
;
3199 if (exp
.X_op
!= O_constant
)
3201 as_bad (_("expected , <constant>"));
3202 ignore_rest_of_line ();
3206 num_regs
= exp
.X_add_number
;
3208 if (num_regs
< 1 || num_regs
> 4)
3210 as_bad (_("number of registers must be in the range [1:4]"));
3211 ignore_rest_of_line ();
3215 demand_empty_rest_of_line ();
3220 op
= 0xb4 | (num_regs
- 1);
3221 add_unwind_opcode (op
, 1);
3226 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3227 add_unwind_opcode (op
, 2);
3229 unwind
.frame_size
+= num_regs
* 12;
3233 /* Parse a directive saving VFP registers. */
3236 s_arm_unwind_save_vfp (void)
3242 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3245 as_bad (_("expected register list"));
3246 ignore_rest_of_line ();
3250 demand_empty_rest_of_line ();
3255 op
= 0xb8 | (count
- 1);
3256 add_unwind_opcode (op
, 1);
3261 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3262 add_unwind_opcode (op
, 2);
3264 unwind
.frame_size
+= count
* 8 + 4;
3268 /* Parse a directive saving iWMMXt data registers. */
3271 s_arm_unwind_save_mmxwr (void)
3279 if (*input_line_pointer
== '{')
3280 input_line_pointer
++;
3284 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3288 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3293 as_tsktsk (_("register list not in ascending order"));
3296 if (*input_line_pointer
== '-')
3298 input_line_pointer
++;
3299 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3302 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3305 else if (reg
>= hi_reg
)
3307 as_bad (_("bad register range"));
3310 for (; reg
< hi_reg
; reg
++)
3314 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3316 if (*input_line_pointer
== '}')
3317 input_line_pointer
++;
3319 demand_empty_rest_of_line ();
3321 /* Generate any deferred opcodes because we're going to be looking at
3323 flush_pending_unwind ();
3325 for (i
= 0; i
< 16; i
++)
3327 if (mask
& (1 << i
))
3328 unwind
.frame_size
+= 8;
3331 /* Attempt to combine with a previous opcode. We do this because gcc
3332 likes to output separate unwind directives for a single block of
3334 if (unwind
.opcode_count
> 0)
3336 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3337 if ((i
& 0xf8) == 0xc0)
3340 /* Only merge if the blocks are contiguous. */
3343 if ((mask
& 0xfe00) == (1 << 9))
3345 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3346 unwind
.opcode_count
--;
3349 else if (i
== 6 && unwind
.opcode_count
>= 2)
3351 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3355 op
= 0xffff << (reg
- 1);
3357 || ((mask
& op
) == (1u << (reg
- 1))))
3359 op
= (1 << (reg
+ i
+ 1)) - 1;
3360 op
&= ~((1 << reg
) - 1);
3362 unwind
.opcode_count
-= 2;
3369 /* We want to generate opcodes in the order the registers have been
3370 saved, ie. descending order. */
3371 for (reg
= 15; reg
>= -1; reg
--)
3373 /* Save registers in blocks. */
3375 || !(mask
& (1 << reg
)))
3377 /* We found an unsaved reg. Generate opcodes to save the
3378 preceeding block. */
3384 op
= 0xc0 | (hi_reg
- 10);
3385 add_unwind_opcode (op
, 1);
3390 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3391 add_unwind_opcode (op
, 2);
3400 ignore_rest_of_line ();
3404 s_arm_unwind_save_mmxwcg (void)
3411 if (*input_line_pointer
== '{')
3412 input_line_pointer
++;
3416 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3420 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3426 as_tsktsk (_("register list not in ascending order"));
3429 if (*input_line_pointer
== '-')
3431 input_line_pointer
++;
3432 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3435 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3438 else if (reg
>= hi_reg
)
3440 as_bad (_("bad register range"));
3443 for (; reg
< hi_reg
; reg
++)
3447 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3449 if (*input_line_pointer
== '}')
3450 input_line_pointer
++;
3452 demand_empty_rest_of_line ();
3454 /* Generate any deferred opcodes because we're going to be looking at
3456 flush_pending_unwind ();
3458 for (reg
= 0; reg
< 16; reg
++)
3460 if (mask
& (1 << reg
))
3461 unwind
.frame_size
+= 4;
3464 add_unwind_opcode (op
, 2);
3467 ignore_rest_of_line ();
3471 /* Parse an unwind_save directive. */
3474 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED
)
3477 struct reg_entry
*reg
;
3478 bfd_boolean had_brace
= FALSE
;
3480 /* Figure out what sort of save we have. */
3481 peek
= input_line_pointer
;
3489 reg
= arm_reg_parse_multi (&peek
);
3493 as_bad (_("register expected"));
3494 ignore_rest_of_line ();
3503 as_bad (_("FPA .unwind_save does not take a register list"));
3504 ignore_rest_of_line ();
3507 s_arm_unwind_save_fpa (reg
->number
);
3510 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
3511 case REG_TYPE_VFD
: s_arm_unwind_save_vfp (); return;
3512 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
3513 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
3516 as_bad (_(".unwind_save does not support this kind of register"));
3517 ignore_rest_of_line ();
3522 /* Parse an unwind_movsp directive. */
3525 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
3530 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3533 as_bad (_(reg_expected_msgs
[REG_TYPE_RN
]));
3534 ignore_rest_of_line ();
3537 demand_empty_rest_of_line ();
3539 if (reg
== REG_SP
|| reg
== REG_PC
)
3541 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3545 if (unwind
.fp_reg
!= REG_SP
)
3546 as_bad (_("unexpected .unwind_movsp directive"));
3548 /* Generate opcode to restore the value. */
3550 add_unwind_opcode (op
, 1);
3552 /* Record the information for later. */
3553 unwind
.fp_reg
= reg
;
3554 unwind
.fp_offset
= unwind
.frame_size
;
3555 unwind
.sp_restored
= 1;
3558 /* Parse an unwind_pad directive. */
3561 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
3565 if (immediate_for_directive (&offset
) == FAIL
)
3570 as_bad (_("stack increment must be multiple of 4"));
3571 ignore_rest_of_line ();
3575 /* Don't generate any opcodes, just record the details for later. */
3576 unwind
.frame_size
+= offset
;
3577 unwind
.pending_offset
+= offset
;
3579 demand_empty_rest_of_line ();
3582 /* Parse an unwind_setfp directive. */
3585 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
3591 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3592 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3595 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3597 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
3599 as_bad (_("expected <reg>, <reg>"));
3600 ignore_rest_of_line ();
3604 /* Optional constant. */
3605 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3607 if (immediate_for_directive (&offset
) == FAIL
)
3613 demand_empty_rest_of_line ();
3615 if (sp_reg
!= 13 && sp_reg
!= unwind
.fp_reg
)
3617 as_bad (_("register must be either sp or set by a previous"
3618 "unwind_movsp directive"));
3622 /* Don't generate any opcodes, just record the information for later. */
3623 unwind
.fp_reg
= fp_reg
;
3626 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3628 unwind
.fp_offset
-= offset
;
3631 /* Parse an unwind_raw directive. */
3634 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
3637 /* This is an arbitrary limit. */
3638 unsigned char op
[16];
3642 if (exp
.X_op
== O_constant
3643 && skip_past_comma (&input_line_pointer
) != FAIL
)
3645 unwind
.frame_size
+= exp
.X_add_number
;
3649 exp
.X_op
= O_illegal
;
3651 if (exp
.X_op
!= O_constant
)
3653 as_bad (_("expected <offset>, <opcode>"));
3654 ignore_rest_of_line ();
3660 /* Parse the opcode. */
3665 as_bad (_("unwind opcode too long"));
3666 ignore_rest_of_line ();
3668 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
3670 as_bad (_("invalid unwind opcode"));
3671 ignore_rest_of_line ();
3674 op
[count
++] = exp
.X_add_number
;
3676 /* Parse the next byte. */
3677 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3683 /* Add the opcode bytes in reverse order. */
3685 add_unwind_opcode (op
[count
], 1);
3687 demand_empty_rest_of_line ();
3691 /* Parse a .eabi_attribute directive. */
3694 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
3697 bfd_boolean is_string
;
3704 if (exp
.X_op
!= O_constant
)
3707 tag
= exp
.X_add_number
;
3708 if (tag
== 4 || tag
== 5 || tag
== 32 || (tag
> 32 && (tag
& 1) != 0))
3713 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3715 if (tag
== 32 || !is_string
)
3718 if (exp
.X_op
!= O_constant
)
3720 as_bad (_("expected numeric constant"));
3721 ignore_rest_of_line ();
3724 i
= exp
.X_add_number
;
3726 if (tag
== Tag_compatibility
3727 && skip_past_comma (&input_line_pointer
) == FAIL
)
3729 as_bad (_("expected comma"));
3730 ignore_rest_of_line ();
3735 skip_whitespace(input_line_pointer
);
3736 if (*input_line_pointer
!= '"')
3738 input_line_pointer
++;
3739 s
= input_line_pointer
;
3740 while (*input_line_pointer
&& *input_line_pointer
!= '"')
3741 input_line_pointer
++;
3742 if (*input_line_pointer
!= '"')
3744 saved_char
= *input_line_pointer
;
3745 *input_line_pointer
= 0;
3753 if (tag
== Tag_compatibility
)
3754 elf32_arm_add_eabi_attr_compat (stdoutput
, i
, s
);
3756 elf32_arm_add_eabi_attr_string (stdoutput
, tag
, s
);
3758 elf32_arm_add_eabi_attr_int (stdoutput
, tag
, i
);
3762 *input_line_pointer
= saved_char
;
3763 input_line_pointer
++;
3765 demand_empty_rest_of_line ();
3768 as_bad (_("bad string constant"));
3769 ignore_rest_of_line ();
3772 as_bad (_("expected <tag> , <value>"));
3773 ignore_rest_of_line ();
3775 #endif /* OBJ_ELF */
3777 static void s_arm_arch (int);
3778 static void s_arm_cpu (int);
3779 static void s_arm_fpu (int);
3781 /* This table describes all the machine specific pseudo-ops the assembler
3782 has to support. The fields are:
3783 pseudo-op name without dot
3784 function to call to execute this pseudo-op
3785 Integer arg to pass to the function. */
3787 const pseudo_typeS md_pseudo_table
[] =
3789 /* Never called because '.req' does not start a line. */
3790 { "req", s_req
, 0 },
3791 /* Following two are likewise never called. */
3794 { "unreq", s_unreq
, 0 },
3795 { "bss", s_bss
, 0 },
3796 { "align", s_align
, 0 },
3797 { "arm", s_arm
, 0 },
3798 { "thumb", s_thumb
, 0 },
3799 { "code", s_code
, 0 },
3800 { "force_thumb", s_force_thumb
, 0 },
3801 { "thumb_func", s_thumb_func
, 0 },
3802 { "thumb_set", s_thumb_set
, 0 },
3803 { "even", s_even
, 0 },
3804 { "ltorg", s_ltorg
, 0 },
3805 { "pool", s_ltorg
, 0 },
3806 { "syntax", s_syntax
, 0 },
3807 { "cpu", s_arm_cpu
, 0 },
3808 { "arch", s_arm_arch
, 0 },
3809 { "fpu", s_arm_fpu
, 0 },
3811 { "word", s_arm_elf_cons
, 4 },
3812 { "long", s_arm_elf_cons
, 4 },
3813 { "rel31", s_arm_rel31
, 0 },
3814 { "fnstart", s_arm_unwind_fnstart
, 0 },
3815 { "fnend", s_arm_unwind_fnend
, 0 },
3816 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
3817 { "personality", s_arm_unwind_personality
, 0 },
3818 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
3819 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
3820 { "save", s_arm_unwind_save
, 0 },
3821 { "movsp", s_arm_unwind_movsp
, 0 },
3822 { "pad", s_arm_unwind_pad
, 0 },
3823 { "setfp", s_arm_unwind_setfp
, 0 },
3824 { "unwind_raw", s_arm_unwind_raw
, 0 },
3825 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
3829 { "extend", float_cons
, 'x' },
3830 { "ldouble", float_cons
, 'x' },
3831 { "packed", float_cons
, 'p' },
3835 /* Parser functions used exclusively in instruction operands. */
3837 /* Generic immediate-value read function for use in insn parsing.
3838 STR points to the beginning of the immediate (the leading #);
3839 VAL receives the value; if the value is outside [MIN, MAX]
3840 issue an error. PREFIX_OPT is true if the immediate prefix is
3844 parse_immediate (char **str
, int *val
, int min
, int max
,
3845 bfd_boolean prefix_opt
)
3848 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
3849 if (exp
.X_op
!= O_constant
)
3851 inst
.error
= _("constant expression required");
3855 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
3857 inst
.error
= _("immediate value out of range");
3861 *val
= exp
.X_add_number
;
3865 /* Less-generic immediate-value read function with the possibility of loading a
3866 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3867 instructions. Puts the result directly in inst.operands[i]. */
3870 parse_big_immediate (char **str
, int i
)
3875 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
3877 if (exp
.X_op
== O_constant
)
3878 inst
.operands
[i
].imm
= exp
.X_add_number
;
3879 else if (exp
.X_op
== O_big
3880 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
3881 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
3883 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
3884 /* Bignums have their least significant bits in
3885 generic_bignum[0]. Make sure we put 32 bits in imm and
3886 32 bits in reg, in a (hopefully) portable way. */
3887 assert (parts
!= 0);
3888 inst
.operands
[i
].imm
= 0;
3889 for (j
= 0; j
< parts
; j
++, idx
++)
3890 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
3891 << (LITTLENUM_NUMBER_OF_BITS
* j
);
3892 inst
.operands
[i
].reg
= 0;
3893 for (j
= 0; j
< parts
; j
++, idx
++)
3894 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
3895 << (LITTLENUM_NUMBER_OF_BITS
* j
);
3896 inst
.operands
[i
].regisimm
= 1;
3906 /* Returns the pseudo-register number of an FPA immediate constant,
3907 or FAIL if there isn't a valid constant here. */
3910 parse_fpa_immediate (char ** str
)
3912 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
3918 /* First try and match exact strings, this is to guarantee
3919 that some formats will work even for cross assembly. */
3921 for (i
= 0; fp_const
[i
]; i
++)
3923 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
3927 *str
+= strlen (fp_const
[i
]);
3928 if (is_end_of_line
[(unsigned char) **str
])
3934 /* Just because we didn't get a match doesn't mean that the constant
3935 isn't valid, just that it is in a format that we don't
3936 automatically recognize. Try parsing it with the standard
3937 expression routines. */
3939 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
3941 /* Look for a raw floating point number. */
3942 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
3943 && is_end_of_line
[(unsigned char) *save_in
])
3945 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
3947 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
3949 if (words
[j
] != fp_values
[i
][j
])
3953 if (j
== MAX_LITTLENUMS
)
3961 /* Try and parse a more complex expression, this will probably fail
3962 unless the code uses a floating point prefix (eg "0f"). */
3963 save_in
= input_line_pointer
;
3964 input_line_pointer
= *str
;
3965 if (expression (&exp
) == absolute_section
3966 && exp
.X_op
== O_big
3967 && exp
.X_add_number
< 0)
3969 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3971 if (gen_to_words (words
, 5, (long) 15) == 0)
3973 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
3975 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
3977 if (words
[j
] != fp_values
[i
][j
])
3981 if (j
== MAX_LITTLENUMS
)
3983 *str
= input_line_pointer
;
3984 input_line_pointer
= save_in
;
3991 *str
= input_line_pointer
;
3992 input_line_pointer
= save_in
;
3993 inst
.error
= _("invalid FPA immediate expression");
3997 /* Returns 1 if a number has "quarter-precision" float format
3998 0baBbbbbbc defgh000 00000000 00000000. */
4001 is_quarter_float (unsigned imm
)
4003 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4004 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4007 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4008 0baBbbbbbc defgh000 00000000 00000000.
4009 The minus-zero case needs special handling, since it can't be encoded in the
4010 "quarter-precision" float format, but can nonetheless be loaded as an integer
4014 parse_qfloat_immediate (char **ccp
, int *immed
)
4017 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4019 skip_past_char (&str
, '#');
4021 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4023 unsigned fpword
= 0;
4026 /* Our FP word must be 32 bits (single-precision FP). */
4027 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4029 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4033 if (is_quarter_float (fpword
) || fpword
== 0x80000000)
4046 /* Shift operands. */
4049 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4052 struct asm_shift_name
4055 enum shift_kind kind
;
4058 /* Third argument to parse_shift. */
4059 enum parse_shift_mode
4061 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4062 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4063 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4064 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4065 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4068 /* Parse a <shift> specifier on an ARM data processing instruction.
4069 This has three forms:
4071 (LSL|LSR|ASL|ASR|ROR) Rs
4072 (LSL|LSR|ASL|ASR|ROR) #imm
4075 Note that ASL is assimilated to LSL in the instruction encoding, and
4076 RRX to ROR #0 (which cannot be written as such). */
4079 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4081 const struct asm_shift_name
*shift_name
;
4082 enum shift_kind shift
;
4087 for (p
= *str
; ISALPHA (*p
); p
++)
4092 inst
.error
= _("shift expression expected");
4096 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4098 if (shift_name
== NULL
)
4100 inst
.error
= _("shift expression expected");
4104 shift
= shift_name
->kind
;
4108 case NO_SHIFT_RESTRICT
:
4109 case SHIFT_IMMEDIATE
: break;
4111 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4112 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4114 inst
.error
= _("'LSL' or 'ASR' required");
4119 case SHIFT_LSL_IMMEDIATE
:
4120 if (shift
!= SHIFT_LSL
)
4122 inst
.error
= _("'LSL' required");
4127 case SHIFT_ASR_IMMEDIATE
:
4128 if (shift
!= SHIFT_ASR
)
4130 inst
.error
= _("'ASR' required");
4138 if (shift
!= SHIFT_RRX
)
4140 /* Whitespace can appear here if the next thing is a bare digit. */
4141 skip_whitespace (p
);
4143 if (mode
== NO_SHIFT_RESTRICT
4144 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4146 inst
.operands
[i
].imm
= reg
;
4147 inst
.operands
[i
].immisreg
= 1;
4149 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4152 inst
.operands
[i
].shift_kind
= shift
;
4153 inst
.operands
[i
].shifted
= 1;
4158 /* Parse a <shifter_operand> for an ARM data processing instruction:
4161 #<immediate>, <rotate>
4165 where <shift> is defined by parse_shift above, and <rotate> is a
4166 multiple of 2 between 0 and 30. Validation of immediate operands
4167 is deferred to md_apply_fix. */
4170 parse_shifter_operand (char **str
, int i
)
4175 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4177 inst
.operands
[i
].reg
= value
;
4178 inst
.operands
[i
].isreg
= 1;
4180 /* parse_shift will override this if appropriate */
4181 inst
.reloc
.exp
.X_op
= O_constant
;
4182 inst
.reloc
.exp
.X_add_number
= 0;
4184 if (skip_past_comma (str
) == FAIL
)
4187 /* Shift operation on register. */
4188 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4191 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4194 if (skip_past_comma (str
) == SUCCESS
)
4196 /* #x, y -- ie explicit rotation by Y. */
4197 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4200 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4202 inst
.error
= _("constant expression expected");
4206 value
= expr
.X_add_number
;
4207 if (value
< 0 || value
> 30 || value
% 2 != 0)
4209 inst
.error
= _("invalid rotation");
4212 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4214 inst
.error
= _("invalid constant");
4218 /* Convert to decoded value. md_apply_fix will put it back. */
4219 inst
.reloc
.exp
.X_add_number
4220 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4221 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4224 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4225 inst
.reloc
.pc_rel
= 0;
4229 /* Parse all forms of an ARM address expression. Information is written
4230 to inst.operands[i] and/or inst.reloc.
4232 Preindexed addressing (.preind=1):
4234 [Rn, #offset] .reg=Rn .reloc.exp=offset
4235 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4236 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4237 .shift_kind=shift .reloc.exp=shift_imm
4239 These three may have a trailing ! which causes .writeback to be set also.
4241 Postindexed addressing (.postind=1, .writeback=1):
4243 [Rn], #offset .reg=Rn .reloc.exp=offset
4244 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4245 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4246 .shift_kind=shift .reloc.exp=shift_imm
4248 Unindexed addressing (.preind=0, .postind=0):
4250 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4254 [Rn]{!} shorthand for [Rn,#0]{!}
4255 =immediate .isreg=0 .reloc.exp=immediate
4256 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4258 It is the caller's responsibility to check for addressing modes not
4259 supported by the instruction, and to set inst.reloc.type. */
4262 parse_address (char **str
, int i
)
4267 if (skip_past_char (&p
, '[') == FAIL
)
4269 if (skip_past_char (&p
, '=') == FAIL
)
4271 /* bare address - translate to PC-relative offset */
4272 inst
.reloc
.pc_rel
= 1;
4273 inst
.operands
[i
].reg
= REG_PC
;
4274 inst
.operands
[i
].isreg
= 1;
4275 inst
.operands
[i
].preind
= 1;
4277 /* else a load-constant pseudo op, no special treatment needed here */
4279 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4286 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4288 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4291 inst
.operands
[i
].reg
= reg
;
4292 inst
.operands
[i
].isreg
= 1;
4294 if (skip_past_comma (&p
) == SUCCESS
)
4296 inst
.operands
[i
].preind
= 1;
4299 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4301 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4303 inst
.operands
[i
].imm
= reg
;
4304 inst
.operands
[i
].immisreg
= 1;
4306 if (skip_past_comma (&p
) == SUCCESS
)
4307 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4310 else if (skip_past_char (&p
, ':') == SUCCESS
)
4312 /* FIXME: '@' should be used here, but it's filtered out by generic
4313 code before we get to see it here. This may be subject to
4316 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4317 if (exp
.X_op
!= O_constant
)
4319 inst
.error
= _("alignment must be constant");
4322 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4323 inst
.operands
[i
].immisalign
= 1;
4324 /* Alignments are not pre-indexes. */
4325 inst
.operands
[i
].preind
= 0;
4329 if (inst
.operands
[i
].negative
)
4331 inst
.operands
[i
].negative
= 0;
4334 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4339 if (skip_past_char (&p
, ']') == FAIL
)
4341 inst
.error
= _("']' expected");
4345 if (skip_past_char (&p
, '!') == SUCCESS
)
4346 inst
.operands
[i
].writeback
= 1;
4348 else if (skip_past_comma (&p
) == SUCCESS
)
4350 if (skip_past_char (&p
, '{') == SUCCESS
)
4352 /* [Rn], {expr} - unindexed, with option */
4353 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
4354 0, 255, TRUE
) == FAIL
)
4357 if (skip_past_char (&p
, '}') == FAIL
)
4359 inst
.error
= _("'}' expected at end of 'option' field");
4362 if (inst
.operands
[i
].preind
)
4364 inst
.error
= _("cannot combine index with option");
4372 inst
.operands
[i
].postind
= 1;
4373 inst
.operands
[i
].writeback
= 1;
4375 if (inst
.operands
[i
].preind
)
4377 inst
.error
= _("cannot combine pre- and post-indexing");
4382 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4384 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4386 /* We might be using the immediate for alignment already. If we
4387 are, OR the register number into the low-order bits. */
4388 if (inst
.operands
[i
].immisalign
)
4389 inst
.operands
[i
].imm
|= reg
;
4391 inst
.operands
[i
].imm
= reg
;
4392 inst
.operands
[i
].immisreg
= 1;
4394 if (skip_past_comma (&p
) == SUCCESS
)
4395 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4400 if (inst
.operands
[i
].negative
)
4402 inst
.operands
[i
].negative
= 0;
4405 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4411 /* If at this point neither .preind nor .postind is set, we have a
4412 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4413 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
4415 inst
.operands
[i
].preind
= 1;
4416 inst
.reloc
.exp
.X_op
= O_constant
;
4417 inst
.reloc
.exp
.X_add_number
= 0;
4423 /* Miscellaneous. */
4425 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4426 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4428 parse_psr (char **str
)
4431 unsigned long psr_field
;
4432 const struct asm_psr
*psr
;
4435 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4436 feature for ease of use and backwards compatibility. */
4438 if (strncasecmp (p
, "SPSR", 4) == 0)
4439 psr_field
= SPSR_BIT
;
4440 else if (strncasecmp (p
, "CPSR", 4) == 0)
4447 while (ISALNUM (*p
) || *p
== '_');
4449 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
4460 /* A suffix follows. */
4466 while (ISALNUM (*p
) || *p
== '_');
4468 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
4472 psr_field
|= psr
->field
;
4477 goto error
; /* Garbage after "[CS]PSR". */
4479 psr_field
|= (PSR_c
| PSR_f
);
4485 inst
.error
= _("flag for {c}psr instruction expected");
4489 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4490 value suitable for splatting into the AIF field of the instruction. */
4493 parse_cps_flags (char **str
)
4502 case '\0': case ',':
4505 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
4506 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
4507 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
4510 inst
.error
= _("unrecognized CPS flag");
4515 if (saw_a_flag
== 0)
4517 inst
.error
= _("missing CPS flags");
4525 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4526 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4529 parse_endian_specifier (char **str
)
4534 if (strncasecmp (s
, "BE", 2))
4536 else if (strncasecmp (s
, "LE", 2))
4540 inst
.error
= _("valid endian specifiers are be or le");
4544 if (ISALNUM (s
[2]) || s
[2] == '_')
4546 inst
.error
= _("valid endian specifiers are be or le");
4551 return little_endian
;
4554 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4555 value suitable for poking into the rotate field of an sxt or sxta
4556 instruction, or FAIL on error. */
4559 parse_ror (char **str
)
4564 if (strncasecmp (s
, "ROR", 3) == 0)
4568 inst
.error
= _("missing rotation field after comma");
4572 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
4577 case 0: *str
= s
; return 0x0;
4578 case 8: *str
= s
; return 0x1;
4579 case 16: *str
= s
; return 0x2;
4580 case 24: *str
= s
; return 0x3;
4583 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
4588 /* Parse a conditional code (from conds[] below). The value returned is in the
4589 range 0 .. 14, or FAIL. */
4591 parse_cond (char **str
)
4594 const struct asm_cond
*c
;
4597 while (ISALPHA (*q
))
4600 c
= hash_find_n (arm_cond_hsh
, p
, q
- p
);
4603 inst
.error
= _("condition required");
4611 /* Parse an option for a barrier instruction. Returns the encoding for the
4614 parse_barrier (char **str
)
4617 const struct asm_barrier_opt
*o
;
4620 while (ISALPHA (*q
))
4623 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
4631 /* Parse the operands of a table branch instruction. Similar to a memory
4634 parse_tb (char **str
)
4639 if (skip_past_char (&p
, '[') == FAIL
)
4641 inst
.error
= _("'[' expected");
4645 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4647 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4650 inst
.operands
[0].reg
= reg
;
4652 if (skip_past_comma (&p
) == FAIL
)
4654 inst
.error
= _("',' expected");
4658 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4660 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4663 inst
.operands
[0].imm
= reg
;
4665 if (skip_past_comma (&p
) == SUCCESS
)
4667 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
4669 if (inst
.reloc
.exp
.X_add_number
!= 1)
4671 inst
.error
= _("invalid shift");
4674 inst
.operands
[0].shifted
= 1;
4677 if (skip_past_char (&p
, ']') == FAIL
)
4679 inst
.error
= _("']' expected");
4686 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4687 information on the types the operands can take and how they are encoded.
4688 Note particularly the abuse of ".regisimm" to signify a Neon register.
4689 Up to three operands may be read; this function handles setting the
4690 ".present" field for each operand itself.
4691 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4692 else returns FAIL. */
4695 parse_neon_mov (char **str
, int *which_operand
)
4697 int i
= *which_operand
, val
;
4698 enum arm_reg_type rtype
;
4700 struct neon_type_el optype
;
4702 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4704 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4705 inst
.operands
[i
].reg
= val
;
4706 inst
.operands
[i
].isscalar
= 1;
4707 inst
.operands
[i
].vectype
= optype
;
4708 inst
.operands
[i
++].present
= 1;
4710 if (skip_past_comma (&ptr
) == FAIL
)
4713 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4716 inst
.operands
[i
].reg
= val
;
4717 inst
.operands
[i
].isreg
= 1;
4718 inst
.operands
[i
].present
= 1;
4720 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NDQ
, &rtype
, &optype
))
4723 /* Cases 0, 1, 2, 3, 5 (D only). */
4724 if (skip_past_comma (&ptr
) == FAIL
)
4727 inst
.operands
[i
].reg
= val
;
4728 inst
.operands
[i
].isreg
= 1;
4729 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4730 inst
.operands
[i
].vectype
= optype
;
4731 inst
.operands
[i
++].present
= 1;
4733 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4735 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4736 inst
.operands
[i
-1].regisimm
= 1;
4737 inst
.operands
[i
].reg
= val
;
4738 inst
.operands
[i
].isreg
= 1;
4739 inst
.operands
[i
++].present
= 1;
4741 if (rtype
== REG_TYPE_NQ
)
4743 first_error (_("can't use Neon quad register here"));
4746 if (skip_past_comma (&ptr
) == FAIL
)
4748 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4750 inst
.operands
[i
].reg
= val
;
4751 inst
.operands
[i
].isreg
= 1;
4752 inst
.operands
[i
].present
= 1;
4754 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
4756 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4757 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4758 if (!thumb_mode
&& (inst
.instruction
& 0xf0000000) != 0xe0000000)
4761 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
4763 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4764 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4765 if (!thumb_mode
&& (inst
.instruction
& 0xf0000000) != 0xe0000000)
4768 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NDQ
, &rtype
, &optype
))
4771 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4772 Case 1: VMOV<c><q> <Dd>, <Dm> */
4773 if (!thumb_mode
&& (inst
.instruction
& 0xf0000000) != 0xe0000000)
4776 inst
.operands
[i
].reg
= val
;
4777 inst
.operands
[i
].isreg
= 1;
4778 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4779 inst
.operands
[i
].vectype
= optype
;
4780 inst
.operands
[i
].present
= 1;
4784 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4788 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4791 inst
.operands
[i
].reg
= val
;
4792 inst
.operands
[i
].isreg
= 1;
4793 inst
.operands
[i
++].present
= 1;
4795 if (skip_past_comma (&ptr
) == FAIL
)
4798 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4800 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4801 inst
.operands
[i
].reg
= val
;
4802 inst
.operands
[i
].isscalar
= 1;
4803 inst
.operands
[i
].present
= 1;
4804 inst
.operands
[i
].vectype
= optype
;
4806 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4808 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4809 inst
.operands
[i
].reg
= val
;
4810 inst
.operands
[i
].isreg
= 1;
4811 inst
.operands
[i
++].present
= 1;
4813 if (skip_past_comma (&ptr
) == FAIL
)
4816 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFD
, NULL
, &optype
))
4819 first_error (_(reg_expected_msgs
[REG_TYPE_VFD
]));
4823 inst
.operands
[i
].reg
= val
;
4824 inst
.operands
[i
].isreg
= 1;
4825 inst
.operands
[i
].regisimm
= 1;
4826 inst
.operands
[i
].vectype
= optype
;
4827 inst
.operands
[i
].present
= 1;
4832 first_error (_("parse error"));
4836 /* Successfully parsed the operands. Update args. */
4842 first_error (_("expected comma"));
4846 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
4850 first_error (_("instruction cannot be conditionalized"));
4854 /* Matcher codes for parse_operands. */
4855 enum operand_parse_code
4857 OP_stop
, /* end of line */
4859 OP_RR
, /* ARM register */
4860 OP_RRnpc
, /* ARM register, not r15 */
4861 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
4862 OP_RRw
, /* ARM register, not r15, optional trailing ! */
4863 OP_RCP
, /* Coprocessor number */
4864 OP_RCN
, /* Coprocessor register */
4865 OP_RF
, /* FPA register */
4866 OP_RVS
, /* VFP single precision register */
4867 OP_RVD
, /* VFP double precision register (0..15) */
4868 OP_RND
, /* Neon double precision register (0..31) */
4869 OP_RNQ
, /* Neon quad precision register */
4870 OP_RNDQ
, /* Neon double or quad precision register */
4871 OP_RNSC
, /* Neon scalar D[X] */
4872 OP_RVC
, /* VFP control register */
4873 OP_RMF
, /* Maverick F register */
4874 OP_RMD
, /* Maverick D register */
4875 OP_RMFX
, /* Maverick FX register */
4876 OP_RMDX
, /* Maverick DX register */
4877 OP_RMAX
, /* Maverick AX register */
4878 OP_RMDS
, /* Maverick DSPSC register */
4879 OP_RIWR
, /* iWMMXt wR register */
4880 OP_RIWC
, /* iWMMXt wC register */
4881 OP_RIWG
, /* iWMMXt wCG register */
4882 OP_RXA
, /* XScale accumulator register */
4884 OP_REGLST
, /* ARM register list */
4885 OP_VRSLST
, /* VFP single-precision register list */
4886 OP_VRDLST
, /* VFP double-precision register list */
4887 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
4888 OP_NSTRLST
, /* Neon element/structure list */
4890 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4891 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
4892 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
4893 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
4894 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
4895 OP_VMOV
, /* Neon VMOV operands. */
4896 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
4897 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
4899 OP_I0
, /* immediate zero */
4900 OP_I7
, /* immediate value 0 .. 7 */
4901 OP_I15
, /* 0 .. 15 */
4902 OP_I16
, /* 1 .. 16 */
4903 OP_I16z
, /* 0 .. 16 */
4904 OP_I31
, /* 0 .. 31 */
4905 OP_I31w
, /* 0 .. 31, optional trailing ! */
4906 OP_I32
, /* 1 .. 32 */
4907 OP_I32z
, /* 0 .. 32 */
4908 OP_I63
, /* 0 .. 63 */
4909 OP_I63s
, /* -64 .. 63 */
4910 OP_I64
, /* 1 .. 64 */
4911 OP_I64z
, /* 0 .. 64 */
4912 OP_I255
, /* 0 .. 255 */
4913 OP_Iffff
, /* 0 .. 65535 */
4915 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
4916 OP_I7b
, /* 0 .. 7 */
4917 OP_I15b
, /* 0 .. 15 */
4918 OP_I31b
, /* 0 .. 31 */
4920 OP_SH
, /* shifter operand */
4921 OP_ADDR
, /* Memory address expression (any mode) */
4922 OP_EXP
, /* arbitrary expression */
4923 OP_EXPi
, /* same, with optional immediate prefix */
4924 OP_EXPr
, /* same, with optional relocation suffix */
4926 OP_CPSF
, /* CPS flags */
4927 OP_ENDI
, /* Endianness specifier */
4928 OP_PSR
, /* CPSR/SPSR mask for msr */
4929 OP_COND
, /* conditional code */
4930 OP_TB
, /* Table branch. */
4932 OP_RRnpc_I0
, /* ARM register or literal 0 */
4933 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
4934 OP_RR_EXi
, /* ARM register or expression with imm prefix */
4935 OP_RF_IF
, /* FPA register or immediate */
4936 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
4938 /* Optional operands. */
4939 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
4940 OP_oI31b
, /* 0 .. 31 */
4941 OP_oI32b
, /* 1 .. 32 */
4942 OP_oIffffb
, /* 0 .. 65535 */
4943 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
4945 OP_oRR
, /* ARM register */
4946 OP_oRRnpc
, /* ARM register, not the PC */
4947 OP_oRND
, /* Optional Neon double precision register */
4948 OP_oRNQ
, /* Optional Neon quad precision register */
4949 OP_oRNDQ
, /* Optional Neon double or quad precision register */
4950 OP_oSHll
, /* LSL immediate */
4951 OP_oSHar
, /* ASR immediate */
4952 OP_oSHllar
, /* LSL or ASR immediate */
4953 OP_oROR
, /* ROR 0/8/16/24 */
4954 OP_oBARRIER
, /* Option argument for a barrier instruction. */
4956 OP_FIRST_OPTIONAL
= OP_oI7b
4959 /* Generic instruction operand parser. This does no encoding and no
4960 semantic validation; it merely squirrels values away in the inst
4961 structure. Returns SUCCESS or FAIL depending on whether the
4962 specified grammar matched. */
4964 parse_operands (char *str
, const unsigned char *pattern
)
4966 unsigned const char *upat
= pattern
;
4967 char *backtrack_pos
= 0;
4968 const char *backtrack_error
= 0;
4969 int i
, val
, backtrack_index
= 0;
4970 enum arm_reg_type rtype
;
4972 #define po_char_or_fail(chr) do { \
4973 if (skip_past_char (&str, chr) == FAIL) \
4977 #define po_reg_or_fail(regtype) do { \
4978 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4979 &inst.operands[i].vectype); \
4982 first_error (_(reg_expected_msgs[regtype])); \
4985 inst.operands[i].reg = val; \
4986 inst.operands[i].isreg = 1; \
4987 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4990 #define po_reg_or_goto(regtype, label) do { \
4991 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4992 &inst.operands[i].vectype); \
4996 inst.operands[i].reg = val; \
4997 inst.operands[i].isreg = 1; \
4998 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5001 #define po_imm_or_fail(min, max, popt) do { \
5002 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5004 inst.operands[i].imm = val; \
5007 #define po_scalar_or_goto(elsz, label) do { \
5008 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5011 inst.operands[i].reg = val; \
5012 inst.operands[i].isscalar = 1; \
5015 #define po_misc_or_fail(expr) do { \
5020 skip_whitespace (str
);
5022 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5024 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5026 /* Remember where we are in case we need to backtrack. */
5027 assert (!backtrack_pos
);
5028 backtrack_pos
= str
;
5029 backtrack_error
= inst
.error
;
5030 backtrack_index
= i
;
5034 po_char_or_fail (',');
5042 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5043 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5044 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5045 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5046 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5047 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5049 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5050 case OP_RVC
: po_reg_or_fail (REG_TYPE_VFC
); break;
5051 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5052 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5053 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5054 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5055 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5056 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5057 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5058 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5059 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5060 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5062 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5064 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5066 /* Neon scalar. Using an element size of 8 means that some invalid
5067 scalars are accepted here, so deal with those in later code. */
5068 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5070 /* WARNING: We can expand to two operands here. This has the potential
5071 to totally confuse the backtracking mechanism! It will be OK at
5072 least as long as we don't try to use optional args as well,
5076 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5078 skip_past_comma (&str
);
5079 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
5082 /* Optional register operand was omitted. Unfortunately, it's in
5083 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5084 here (this is a bit grotty). */
5085 inst
.operands
[i
] = inst
.operands
[i
-1];
5086 inst
.operands
[i
-1].present
= 0;
5089 /* Immediate gets verified properly later, so accept any now. */
5090 po_imm_or_fail (INT_MIN
, INT_MAX
, TRUE
);
5096 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
5099 po_imm_or_fail (0, 0, TRUE
);
5105 po_scalar_or_goto (8, try_rr
);
5108 po_reg_or_fail (REG_TYPE_RN
);
5114 po_scalar_or_goto (8, try_ndq
);
5117 po_reg_or_fail (REG_TYPE_NDQ
);
5123 po_scalar_or_goto (8, try_vfd
);
5126 po_reg_or_fail (REG_TYPE_VFD
);
5131 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5132 not careful then bad things might happen. */
5133 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
5138 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
5141 /* There's a possibility of getting a 64-bit immediate here, so
5142 we need special handling. */
5143 if (parse_big_immediate (&str
, i
) == FAIL
)
5145 inst
.error
= _("immediate value is out of range");
5153 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
5156 po_imm_or_fail (0, 63, TRUE
);
5161 po_char_or_fail ('[');
5162 po_reg_or_fail (REG_TYPE_RN
);
5163 po_char_or_fail (']');
5167 po_reg_or_fail (REG_TYPE_RN
);
5168 if (skip_past_char (&str
, '!') == SUCCESS
)
5169 inst
.operands
[i
].writeback
= 1;
5173 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
5174 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
5175 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
5176 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
5177 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
5178 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
5179 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
5180 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
5181 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
5182 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
5183 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
5184 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
5185 case OP_Iffff
: po_imm_or_fail ( 0, 0xffff, FALSE
); break;
5187 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
5189 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
5190 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
5192 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
5193 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
5194 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
5196 /* Immediate variants */
5198 po_char_or_fail ('{');
5199 po_imm_or_fail (0, 255, TRUE
);
5200 po_char_or_fail ('}');
5204 /* The expression parser chokes on a trailing !, so we have
5205 to find it first and zap it. */
5208 while (*s
&& *s
!= ',')
5213 inst
.operands
[i
].writeback
= 1;
5215 po_imm_or_fail (0, 31, TRUE
);
5223 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5228 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5233 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5235 if (inst
.reloc
.exp
.X_op
== O_symbol
)
5237 val
= parse_reloc (&str
);
5240 inst
.error
= _("unrecognized relocation suffix");
5243 else if (val
!= BFD_RELOC_UNUSED
)
5245 inst
.operands
[i
].imm
= val
;
5246 inst
.operands
[i
].hasreloc
= 1;
5251 /* Register or expression */
5252 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
5253 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
5255 /* Register or immediate */
5256 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
5257 I0
: po_imm_or_fail (0, 0, FALSE
); break;
5259 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
5261 if (!is_immediate_prefix (*str
))
5264 val
= parse_fpa_immediate (&str
);
5267 /* FPA immediates are encoded as registers 8-15.
5268 parse_fpa_immediate has already applied the offset. */
5269 inst
.operands
[i
].reg
= val
;
5270 inst
.operands
[i
].isreg
= 1;
5273 /* Two kinds of register */
5276 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5277 if (rege
->type
!= REG_TYPE_MMXWR
5278 && rege
->type
!= REG_TYPE_MMXWC
5279 && rege
->type
!= REG_TYPE_MMXWCG
)
5281 inst
.error
= _("iWMMXt data or control register expected");
5284 inst
.operands
[i
].reg
= rege
->number
;
5285 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
5290 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
5291 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
5292 case OP_oROR
: val
= parse_ror (&str
); break;
5293 case OP_PSR
: val
= parse_psr (&str
); break;
5294 case OP_COND
: val
= parse_cond (&str
); break;
5295 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
5298 po_misc_or_fail (parse_tb (&str
));
5301 /* Register lists */
5303 val
= parse_reg_list (&str
);
5306 inst
.operands
[1].writeback
= 1;
5312 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
5316 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
5320 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5325 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
5326 &inst
.operands
[i
].vectype
);
5329 /* Addressing modes */
5331 po_misc_or_fail (parse_address (&str
, i
));
5335 po_misc_or_fail (parse_shifter_operand (&str
, i
));
5339 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
5343 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
5347 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
5351 as_fatal ("unhandled operand code %d", upat
[i
]);
5354 /* Various value-based sanity checks and shared operations. We
5355 do not signal immediate failures for the register constraints;
5356 this allows a syntax error to take precedence. */
5364 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
5365 inst
.error
= BAD_PC
;
5381 inst
.operands
[i
].imm
= val
;
5388 /* If we get here, this operand was successfully parsed. */
5389 inst
.operands
[i
].present
= 1;
5393 inst
.error
= BAD_ARGS
;
5398 /* The parse routine should already have set inst.error, but set a
5399 defaut here just in case. */
5401 inst
.error
= _("syntax error");
5405 /* Do not backtrack over a trailing optional argument that
5406 absorbed some text. We will only fail again, with the
5407 'garbage following instruction' error message, which is
5408 probably less helpful than the current one. */
5409 if (backtrack_index
== i
&& backtrack_pos
!= str
5410 && upat
[i
+1] == OP_stop
)
5413 inst
.error
= _("syntax error");
5417 /* Try again, skipping the optional argument at backtrack_pos. */
5418 str
= backtrack_pos
;
5419 inst
.error
= backtrack_error
;
5420 inst
.operands
[backtrack_index
].present
= 0;
5421 i
= backtrack_index
;
5425 /* Check that we have parsed all the arguments. */
5426 if (*str
!= '\0' && !inst
.error
)
5427 inst
.error
= _("garbage following instruction");
5429 return inst
.error
? FAIL
: SUCCESS
;
5432 #undef po_char_or_fail
5433 #undef po_reg_or_fail
5434 #undef po_reg_or_goto
5435 #undef po_imm_or_fail
5436 #undef po_scalar_or_fail
5438 /* Shorthand macro for instruction encoding functions issuing errors. */
5439 #define constraint(expr, err) do { \
5447 /* Functions for operand encoding. ARM, then Thumb. */
5449 #define rotate_left(v, n) (v << n | v >> (32 - n))
5451 /* If VAL can be encoded in the immediate field of an ARM instruction,
5452 return the encoded form. Otherwise, return FAIL. */
5455 encode_arm_immediate (unsigned int val
)
5459 for (i
= 0; i
< 32; i
+= 2)
5460 if ((a
= rotate_left (val
, i
)) <= 0xff)
5461 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
5466 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5467 return the encoded form. Otherwise, return FAIL. */
5469 encode_thumb32_immediate (unsigned int val
)
5476 for (i
= 1; i
<= 24; i
++)
5479 if ((val
& ~(0xff << i
)) == 0)
5480 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
5484 if (val
== ((a
<< 16) | a
))
5486 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
5490 if (val
== ((a
<< 16) | a
))
5491 return 0x200 | (a
>> 8);
5495 /* Encode a VFP SP or DP register number into inst.instruction. */
5498 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
5500 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
5503 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
5506 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
5509 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
5514 first_error (_("D register out of range for selected VFP version"));
5522 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
5526 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
5530 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
5534 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
5538 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
5542 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
5550 /* Encode a <shift> in an ARM-format instruction. The immediate,
5551 if any, is handled by md_apply_fix. */
5553 encode_arm_shift (int i
)
5555 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5556 inst
.instruction
|= SHIFT_ROR
<< 5;
5559 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5560 if (inst
.operands
[i
].immisreg
)
5562 inst
.instruction
|= SHIFT_BY_REG
;
5563 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
5566 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5571 encode_arm_shifter_operand (int i
)
5573 if (inst
.operands
[i
].isreg
)
5575 inst
.instruction
|= inst
.operands
[i
].reg
;
5576 encode_arm_shift (i
);
5579 inst
.instruction
|= INST_IMMEDIATE
;
5582 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5584 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
5586 assert (inst
.operands
[i
].isreg
);
5587 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5589 if (inst
.operands
[i
].preind
)
5593 inst
.error
= _("instruction does not accept preindexed addressing");
5596 inst
.instruction
|= PRE_INDEX
;
5597 if (inst
.operands
[i
].writeback
)
5598 inst
.instruction
|= WRITE_BACK
;
5601 else if (inst
.operands
[i
].postind
)
5603 assert (inst
.operands
[i
].writeback
);
5605 inst
.instruction
|= WRITE_BACK
;
5607 else /* unindexed - only for coprocessor */
5609 inst
.error
= _("instruction does not accept unindexed addressing");
5613 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
5614 && (((inst
.instruction
& 0x000f0000) >> 16)
5615 == ((inst
.instruction
& 0x0000f000) >> 12)))
5616 as_warn ((inst
.instruction
& LOAD_BIT
)
5617 ? _("destination register same as write-back base")
5618 : _("source register same as write-back base"));
5621 /* inst.operands[i] was set up by parse_address. Encode it into an
5622 ARM-format mode 2 load or store instruction. If is_t is true,
5623 reject forms that cannot be used with a T instruction (i.e. not
5626 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
5628 encode_arm_addr_mode_common (i
, is_t
);
5630 if (inst
.operands
[i
].immisreg
)
5632 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
5633 inst
.instruction
|= inst
.operands
[i
].imm
;
5634 if (!inst
.operands
[i
].negative
)
5635 inst
.instruction
|= INDEX_UP
;
5636 if (inst
.operands
[i
].shifted
)
5638 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5639 inst
.instruction
|= SHIFT_ROR
<< 5;
5642 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5643 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5647 else /* immediate offset in inst.reloc */
5649 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5650 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
5654 /* inst.operands[i] was set up by parse_address. Encode it into an
5655 ARM-format mode 3 load or store instruction. Reject forms that
5656 cannot be used with such instructions. If is_t is true, reject
5657 forms that cannot be used with a T instruction (i.e. not
5660 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
5662 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
5664 inst
.error
= _("instruction does not accept scaled register index");
5668 encode_arm_addr_mode_common (i
, is_t
);
5670 if (inst
.operands
[i
].immisreg
)
5672 inst
.instruction
|= inst
.operands
[i
].imm
;
5673 if (!inst
.operands
[i
].negative
)
5674 inst
.instruction
|= INDEX_UP
;
5676 else /* immediate offset in inst.reloc */
5678 inst
.instruction
|= HWOFFSET_IMM
;
5679 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5680 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
5684 /* inst.operands[i] was set up by parse_address. Encode it into an
5685 ARM-format instruction. Reject all forms which cannot be encoded
5686 into a coprocessor load/store instruction. If wb_ok is false,
5687 reject use of writeback; if unind_ok is false, reject use of
5688 unindexed addressing. If reloc_override is not 0, use it instead
5689 of BFD_ARM_CP_OFF_IMM. */
5692 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
5694 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5696 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
5698 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
5700 assert (!inst
.operands
[i
].writeback
);
5703 inst
.error
= _("instruction does not support unindexed addressing");
5706 inst
.instruction
|= inst
.operands
[i
].imm
;
5707 inst
.instruction
|= INDEX_UP
;
5711 if (inst
.operands
[i
].preind
)
5712 inst
.instruction
|= PRE_INDEX
;
5714 if (inst
.operands
[i
].writeback
)
5716 if (inst
.operands
[i
].reg
== REG_PC
)
5718 inst
.error
= _("pc may not be used with write-back");
5723 inst
.error
= _("instruction does not support writeback");
5726 inst
.instruction
|= WRITE_BACK
;
5730 inst
.reloc
.type
= reloc_override
;
5731 else if (thumb_mode
)
5732 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
5734 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
5738 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5739 Determine whether it can be performed with a move instruction; if
5740 it can, convert inst.instruction to that move instruction and
5741 return 1; if it can't, convert inst.instruction to a literal-pool
5742 load and return 0. If this is not a valid thing to do in the
5743 current context, set inst.error and return 1.
5745 inst.operands[i] describes the destination register. */
5748 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
5753 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
5757 if ((inst
.instruction
& tbit
) == 0)
5759 inst
.error
= _("invalid pseudo operation");
5762 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
5764 inst
.error
= _("constant expression expected");
5767 if (inst
.reloc
.exp
.X_op
== O_constant
)
5771 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
5773 /* This can be done with a mov(1) instruction. */
5774 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
5775 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
5781 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
5784 /* This can be done with a mov instruction. */
5785 inst
.instruction
&= LITERAL_MASK
;
5786 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
5787 inst
.instruction
|= value
& 0xfff;
5791 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
5794 /* This can be done with a mvn instruction. */
5795 inst
.instruction
&= LITERAL_MASK
;
5796 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
5797 inst
.instruction
|= value
& 0xfff;
5803 if (add_to_lit_pool () == FAIL
)
5805 inst
.error
= _("literal pool insertion failed");
5808 inst
.operands
[1].reg
= REG_PC
;
5809 inst
.operands
[1].isreg
= 1;
5810 inst
.operands
[1].preind
= 1;
5811 inst
.reloc
.pc_rel
= 1;
5812 inst
.reloc
.type
= (thumb_p
5813 ? BFD_RELOC_ARM_THUMB_OFFSET
5815 ? BFD_RELOC_ARM_HWLITERAL
5816 : BFD_RELOC_ARM_LITERAL
));
5820 /* Functions for instruction encoding, sorted by subarchitecture.
5821 First some generics; their names are taken from the conventional
5822 bit positions for register arguments in ARM format instructions. */
5832 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5838 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5839 inst
.instruction
|= inst
.operands
[1].reg
;
5845 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5846 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5852 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
5853 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
5859 unsigned Rn
= inst
.operands
[2].reg
;
5860 /* Enforce restrictions on SWP instruction. */
5861 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
5862 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
5863 _("Rn must not overlap other operands"));
5864 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5865 inst
.instruction
|= inst
.operands
[1].reg
;
5866 inst
.instruction
|= Rn
<< 16;
5872 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5873 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5874 inst
.instruction
|= inst
.operands
[2].reg
;
5880 inst
.instruction
|= inst
.operands
[0].reg
;
5881 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
5882 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
5888 inst
.instruction
|= inst
.operands
[0].imm
;
5894 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5895 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
5898 /* ARM instructions, in alphabetical order by function name (except
5899 that wrapper functions appear immediately after the function they
5902 /* This is a pseudo-op of the form "adr rd, label" to be converted
5903 into a relative address of the form "add rd, pc, #label-.-8". */
5908 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
5910 /* Frag hacking will turn this into a sub instruction if the offset turns
5911 out to be negative. */
5912 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5913 inst
.reloc
.pc_rel
= 1;
5914 inst
.reloc
.exp
.X_add_number
-= 8;
5917 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5918 into a relative address of the form:
5919 add rd, pc, #low(label-.-8)"
5920 add rd, rd, #high(label-.-8)" */
5925 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
5927 /* Frag hacking will turn this into a sub instruction if the offset turns
5928 out to be negative. */
5929 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
5930 inst
.reloc
.pc_rel
= 1;
5931 inst
.size
= INSN_SIZE
* 2;
5932 inst
.reloc
.exp
.X_add_number
-= 8;
5938 if (!inst
.operands
[1].present
)
5939 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
5940 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5941 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5942 encode_arm_shifter_operand (2);
5948 if (inst
.operands
[0].present
)
5950 constraint ((inst
.instruction
& 0xf0) != 0x40
5951 && inst
.operands
[0].imm
!= 0xf,
5952 "bad barrier type");
5953 inst
.instruction
|= inst
.operands
[0].imm
;
5956 inst
.instruction
|= 0xf;
5962 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
5963 constraint (msb
> 32, _("bit-field extends past end of register"));
5964 /* The instruction encoding stores the LSB and MSB,
5965 not the LSB and width. */
5966 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5967 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
5968 inst
.instruction
|= (msb
- 1) << 16;
5976 /* #0 in second position is alternative syntax for bfc, which is
5977 the same instruction but with REG_PC in the Rm field. */
5978 if (!inst
.operands
[1].isreg
)
5979 inst
.operands
[1].reg
= REG_PC
;
5981 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
5982 constraint (msb
> 32, _("bit-field extends past end of register"));
5983 /* The instruction encoding stores the LSB and MSB,
5984 not the LSB and width. */
5985 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5986 inst
.instruction
|= inst
.operands
[1].reg
;
5987 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
5988 inst
.instruction
|= (msb
- 1) << 16;
5994 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
5995 _("bit-field extends past end of register"));
5996 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5997 inst
.instruction
|= inst
.operands
[1].reg
;
5998 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
5999 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
6002 /* ARM V5 breakpoint instruction (argument parse)
6003 BKPT <16 bit unsigned immediate>
6004 Instruction is not conditional.
6005 The bit pattern given in insns[] has the COND_ALWAYS condition,
6006 and it is an error if the caller tried to override that. */
6011 /* Top 12 of 16 bits to bits 19:8. */
6012 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
6014 /* Bottom 4 of 16 bits to bits 3:0. */
6015 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
6019 encode_branch (int default_reloc
)
6021 if (inst
.operands
[0].hasreloc
)
6023 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
6024 _("the only suffix valid here is '(plt)'"));
6025 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
6029 inst
.reloc
.type
= default_reloc
;
6031 inst
.reloc
.pc_rel
= 1;
6038 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6039 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6042 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6049 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6051 if (inst
.cond
== COND_ALWAYS
)
6052 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6054 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6058 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6061 /* ARM V5 branch-link-exchange instruction (argument parse)
6062 BLX <target_addr> ie BLX(1)
6063 BLX{<condition>} <Rm> ie BLX(2)
6064 Unfortunately, there are two different opcodes for this mnemonic.
6065 So, the insns[].value is not used, and the code here zaps values
6066 into inst.instruction.
6067 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6072 if (inst
.operands
[0].isreg
)
6074 /* Arg is a register; the opcode provided by insns[] is correct.
6075 It is not illegal to do "blx pc", just useless. */
6076 if (inst
.operands
[0].reg
== REG_PC
)
6077 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6079 inst
.instruction
|= inst
.operands
[0].reg
;
6083 /* Arg is an address; this instruction cannot be executed
6084 conditionally, and the opcode must be adjusted. */
6085 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6086 inst
.instruction
= 0xfa000000;
6088 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6089 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6092 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
6099 if (inst
.operands
[0].reg
== REG_PC
)
6100 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6102 inst
.instruction
|= inst
.operands
[0].reg
;
6106 /* ARM v5TEJ. Jump to Jazelle code. */
6111 if (inst
.operands
[0].reg
== REG_PC
)
6112 as_tsktsk (_("use of r15 in bxj is not really useful"));
6114 inst
.instruction
|= inst
.operands
[0].reg
;
6117 /* Co-processor data operation:
6118 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6119 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6123 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6124 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
6125 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6126 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6127 inst
.instruction
|= inst
.operands
[4].reg
;
6128 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6134 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6135 encode_arm_shifter_operand (1);
6138 /* Transfer between coprocessor and ARM registers.
6139 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6144 No special properties. */
6149 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6150 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
6151 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6152 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6153 inst
.instruction
|= inst
.operands
[4].reg
;
6154 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6157 /* Transfer between coprocessor register and pair of ARM registers.
6158 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6163 Two XScale instructions are special cases of these:
6165 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6166 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6168 Result unpredicatable if Rd or Rn is R15. */
6173 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6174 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
6175 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6176 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6177 inst
.instruction
|= inst
.operands
[4].reg
;
6183 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
6184 inst
.instruction
|= inst
.operands
[1].imm
;
6190 inst
.instruction
|= inst
.operands
[0].imm
;
6196 /* There is no IT instruction in ARM mode. We
6197 process it but do not generate code for it. */
6204 int base_reg
= inst
.operands
[0].reg
;
6205 int range
= inst
.operands
[1].imm
;
6207 inst
.instruction
|= base_reg
<< 16;
6208 inst
.instruction
|= range
;
6210 if (inst
.operands
[1].writeback
)
6211 inst
.instruction
|= LDM_TYPE_2_OR_3
;
6213 if (inst
.operands
[0].writeback
)
6215 inst
.instruction
|= WRITE_BACK
;
6216 /* Check for unpredictable uses of writeback. */
6217 if (inst
.instruction
& LOAD_BIT
)
6219 /* Not allowed in LDM type 2. */
6220 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
6221 && ((range
& (1 << REG_PC
)) == 0))
6222 as_warn (_("writeback of base register is UNPREDICTABLE"));
6223 /* Only allowed if base reg not in list for other types. */
6224 else if (range
& (1 << base_reg
))
6225 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6229 /* Not allowed for type 2. */
6230 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
6231 as_warn (_("writeback of base register is UNPREDICTABLE"));
6232 /* Only allowed if base reg not in list, or first in list. */
6233 else if ((range
& (1 << base_reg
))
6234 && (range
& ((1 << base_reg
) - 1)))
6235 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6240 /* ARMv5TE load-consecutive (argument parse)
6249 constraint (inst
.operands
[0].reg
% 2 != 0,
6250 _("first destination register must be even"));
6251 constraint (inst
.operands
[1].present
6252 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6253 _("can only load two consecutive registers"));
6254 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6255 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
6257 if (!inst
.operands
[1].present
)
6258 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
6260 if (inst
.instruction
& LOAD_BIT
)
6262 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6263 register and the first register written; we have to diagnose
6264 overlap between the base and the second register written here. */
6266 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
6267 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
6268 as_warn (_("base register written back, and overlaps "
6269 "second destination register"));
6271 /* For an index-register load, the index register must not overlap the
6272 destination (even if not write-back). */
6273 else if (inst
.operands
[2].immisreg
6274 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
6275 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
6276 as_warn (_("index register overlaps destination register"));
6279 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6280 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
6286 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
6287 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
6288 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
6289 || inst
.operands
[1].negative
6290 /* This can arise if the programmer has written
6292 or if they have mistakenly used a register name as the last
6295 It is very difficult to distinguish between these two cases
6296 because "rX" might actually be a label. ie the register
6297 name has been occluded by a symbol of the same name. So we
6298 just generate a general 'bad addressing mode' type error
6299 message and leave it up to the programmer to discover the
6300 true cause and fix their mistake. */
6301 || (inst
.operands
[1].reg
== REG_PC
),
6304 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6305 || inst
.reloc
.exp
.X_add_number
!= 0,
6306 _("offset must be zero in ARM encoding"));
6308 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6309 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6310 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6316 constraint (inst
.operands
[0].reg
% 2 != 0,
6317 _("even register required"));
6318 constraint (inst
.operands
[1].present
6319 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6320 _("can only load two consecutive registers"));
6321 /* If op 1 were present and equal to PC, this function wouldn't
6322 have been called in the first place. */
6323 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6325 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6326 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6332 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6333 if (!inst
.operands
[1].isreg
)
6334 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
6336 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
6342 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6344 if (inst
.operands
[1].preind
)
6346 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6347 inst
.reloc
.exp
.X_add_number
!= 0,
6348 _("this instruction requires a post-indexed address"));
6350 inst
.operands
[1].preind
= 0;
6351 inst
.operands
[1].postind
= 1;
6352 inst
.operands
[1].writeback
= 1;
6354 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6355 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
6358 /* Halfword and signed-byte load/store operations. */
6363 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6364 if (!inst
.operands
[1].isreg
)
6365 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
6367 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
6373 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6375 if (inst
.operands
[1].preind
)
6377 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6378 inst
.reloc
.exp
.X_add_number
!= 0,
6379 _("this instruction requires a post-indexed address"));
6381 inst
.operands
[1].preind
= 0;
6382 inst
.operands
[1].postind
= 1;
6383 inst
.operands
[1].writeback
= 1;
6385 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6386 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
6389 /* Co-processor register load/store.
6390 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6394 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6395 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6396 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
6402 /* This restriction does not apply to mls (nor to mla in v6, but
6403 that's hard to detect at present). */
6404 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6405 && !(inst
.instruction
& 0x00400000))
6406 as_tsktsk (_("rd and rm should be different in mla"));
6408 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6409 inst
.instruction
|= inst
.operands
[1].reg
;
6410 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6411 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6418 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6419 encode_arm_shifter_operand (1);
6422 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6426 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6427 /* The value is in two pieces: 0:11, 16:19. */
6428 inst
.instruction
|= (inst
.operands
[1].imm
& 0x00000fff);
6429 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0000f000) << 4;
6435 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6436 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
6438 _("'CPSR' or 'SPSR' expected"));
6439 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6440 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
6443 /* Two possible forms:
6444 "{C|S}PSR_<field>, Rm",
6445 "{C|S}PSR_f, #expression". */
6450 inst
.instruction
|= inst
.operands
[0].imm
;
6451 if (inst
.operands
[1].isreg
)
6452 inst
.instruction
|= inst
.operands
[1].reg
;
6455 inst
.instruction
|= INST_IMMEDIATE
;
6456 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6457 inst
.reloc
.pc_rel
= 0;
6464 if (!inst
.operands
[2].present
)
6465 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
6466 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6467 inst
.instruction
|= inst
.operands
[1].reg
;
6468 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6470 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6471 as_tsktsk (_("rd and rm should be different in mul"));
6474 /* Long Multiply Parser
6475 UMULL RdLo, RdHi, Rm, Rs
6476 SMULL RdLo, RdHi, Rm, Rs
6477 UMLAL RdLo, RdHi, Rm, Rs
6478 SMLAL RdLo, RdHi, Rm, Rs. */
6483 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6484 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6485 inst
.instruction
|= inst
.operands
[2].reg
;
6486 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6488 /* rdhi, rdlo and rm must all be different. */
6489 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6490 || inst
.operands
[0].reg
== inst
.operands
[2].reg
6491 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
6492 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6498 if (inst
.operands
[0].present
)
6500 /* Architectural NOP hints are CPSR sets with no bits selected. */
6501 inst
.instruction
&= 0xf0000000;
6502 inst
.instruction
|= 0x0320f000 + inst
.operands
[0].imm
;
6506 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6507 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6508 Condition defaults to COND_ALWAYS.
6509 Error if Rd, Rn or Rm are R15. */
6514 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6515 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6516 inst
.instruction
|= inst
.operands
[2].reg
;
6517 if (inst
.operands
[3].present
)
6518 encode_arm_shift (3);
6521 /* ARM V6 PKHTB (Argument Parse). */
6526 if (!inst
.operands
[3].present
)
6528 /* If the shift specifier is omitted, turn the instruction
6529 into pkhbt rd, rm, rn. */
6530 inst
.instruction
&= 0xfff00010;
6531 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6532 inst
.instruction
|= inst
.operands
[1].reg
;
6533 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6537 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6538 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6539 inst
.instruction
|= inst
.operands
[2].reg
;
6540 encode_arm_shift (3);
6544 /* ARMv5TE: Preload-Cache
6548 Syntactically, like LDR with B=1, W=0, L=1. */
6553 constraint (!inst
.operands
[0].isreg
,
6554 _("'[' expected after PLD mnemonic"));
6555 constraint (inst
.operands
[0].postind
,
6556 _("post-indexed expression used in preload instruction"));
6557 constraint (inst
.operands
[0].writeback
,
6558 _("writeback used in preload instruction"));
6559 constraint (!inst
.operands
[0].preind
,
6560 _("unindexed addressing used in preload instruction"));
6561 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6564 /* ARMv7: PLI <addr_mode> */
6568 constraint (!inst
.operands
[0].isreg
,
6569 _("'[' expected after PLI mnemonic"));
6570 constraint (inst
.operands
[0].postind
,
6571 _("post-indexed expression used in preload instruction"));
6572 constraint (inst
.operands
[0].writeback
,
6573 _("writeback used in preload instruction"));
6574 constraint (!inst
.operands
[0].preind
,
6575 _("unindexed addressing used in preload instruction"));
6576 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6577 inst
.instruction
&= ~PRE_INDEX
;
6583 inst
.operands
[1] = inst
.operands
[0];
6584 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
6585 inst
.operands
[0].isreg
= 1;
6586 inst
.operands
[0].writeback
= 1;
6587 inst
.operands
[0].reg
= REG_SP
;
6591 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6592 word at the specified address and the following word
6594 Unconditionally executed.
6595 Error if Rn is R15. */
6600 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6601 if (inst
.operands
[0].writeback
)
6602 inst
.instruction
|= WRITE_BACK
;
6605 /* ARM V6 ssat (argument parse). */
6610 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6611 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
6612 inst
.instruction
|= inst
.operands
[2].reg
;
6614 if (inst
.operands
[3].present
)
6615 encode_arm_shift (3);
6618 /* ARM V6 usat (argument parse). */
6623 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6624 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6625 inst
.instruction
|= inst
.operands
[2].reg
;
6627 if (inst
.operands
[3].present
)
6628 encode_arm_shift (3);
6631 /* ARM V6 ssat16 (argument parse). */
6636 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6637 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
6638 inst
.instruction
|= inst
.operands
[2].reg
;
6644 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6645 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6646 inst
.instruction
|= inst
.operands
[2].reg
;
6649 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6650 preserving the other bits.
6652 setend <endian_specifier>, where <endian_specifier> is either
6658 if (inst
.operands
[0].imm
)
6659 inst
.instruction
|= 0x200;
6665 unsigned int Rm
= (inst
.operands
[1].present
6666 ? inst
.operands
[1].reg
6667 : inst
.operands
[0].reg
);
6669 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6670 inst
.instruction
|= Rm
;
6671 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
6673 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6674 inst
.instruction
|= SHIFT_BY_REG
;
6677 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6683 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
6684 inst
.reloc
.pc_rel
= 0;
6690 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
6691 inst
.reloc
.pc_rel
= 0;
6694 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6695 SMLAxy{cond} Rd,Rm,Rs,Rn
6696 SMLAWy{cond} Rd,Rm,Rs,Rn
6697 Error if any register is R15. */
6702 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6703 inst
.instruction
|= inst
.operands
[1].reg
;
6704 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6705 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6708 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6709 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6710 Error if any register is R15.
6711 Warning if Rdlo == Rdhi. */
6716 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6717 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6718 inst
.instruction
|= inst
.operands
[2].reg
;
6719 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6721 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6722 as_tsktsk (_("rdhi and rdlo must be different"));
6725 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6726 SMULxy{cond} Rd,Rm,Rs
6727 Error if any register is R15. */
6732 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6733 inst
.instruction
|= inst
.operands
[1].reg
;
6734 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6737 /* ARM V6 srs (argument parse). */
6742 inst
.instruction
|= inst
.operands
[0].imm
;
6743 if (inst
.operands
[0].writeback
)
6744 inst
.instruction
|= WRITE_BACK
;
6747 /* ARM V6 strex (argument parse). */
6752 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
6753 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
6754 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
6755 || inst
.operands
[2].negative
6756 /* See comment in do_ldrex(). */
6757 || (inst
.operands
[2].reg
== REG_PC
),
6760 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
6761 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
6763 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6764 || inst
.reloc
.exp
.X_add_number
!= 0,
6765 _("offset must be zero in ARM encoding"));
6767 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6768 inst
.instruction
|= inst
.operands
[1].reg
;
6769 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6770 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6776 constraint (inst
.operands
[1].reg
% 2 != 0,
6777 _("even register required"));
6778 constraint (inst
.operands
[2].present
6779 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
6780 _("can only store two consecutive registers"));
6781 /* If op 2 were present and equal to PC, this function wouldn't
6782 have been called in the first place. */
6783 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
6785 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
6786 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
6787 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
6790 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6791 inst
.instruction
|= inst
.operands
[1].reg
;
6792 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6795 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6796 extends it to 32-bits, and adds the result to a value in another
6797 register. You can specify a rotation by 0, 8, 16, or 24 bits
6798 before extracting the 16-bit value.
6799 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6800 Condition defaults to COND_ALWAYS.
6801 Error if any register uses R15. */
6806 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6807 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6808 inst
.instruction
|= inst
.operands
[2].reg
;
6809 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
6814 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6815 Condition defaults to COND_ALWAYS.
6816 Error if any register uses R15. */
6821 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6822 inst
.instruction
|= inst
.operands
[1].reg
;
6823 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
6826 /* VFP instructions. In a logical order: SP variant first, monad
6827 before dyad, arithmetic then move then load/store. */
6830 do_vfp_sp_monadic (void)
6832 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6833 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6837 do_vfp_sp_dyadic (void)
6839 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6840 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
6841 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
6845 do_vfp_sp_compare_z (void)
6847 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6851 do_vfp_dp_sp_cvt (void)
6853 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6854 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6858 do_vfp_sp_dp_cvt (void)
6860 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6861 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6865 do_vfp_reg_from_sp (void)
6867 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6868 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
6872 do_vfp_reg2_from_sp2 (void)
6874 constraint (inst
.operands
[2].imm
!= 2,
6875 _("only two consecutive VFP SP registers allowed here"));
6876 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6877 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6878 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
6882 do_vfp_sp_from_reg (void)
6884 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
6885 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6889 do_vfp_sp2_from_reg2 (void)
6891 constraint (inst
.operands
[0].imm
!= 2,
6892 _("only two consecutive VFP SP registers allowed here"));
6893 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
6894 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6895 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6899 do_vfp_sp_ldst (void)
6901 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6902 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
6906 do_vfp_dp_ldst (void)
6908 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6909 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
6914 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
6916 if (inst
.operands
[0].writeback
)
6917 inst
.instruction
|= WRITE_BACK
;
6919 constraint (ldstm_type
!= VFP_LDSTMIA
,
6920 _("this addressing mode requires base-register writeback"));
6921 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6922 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
6923 inst
.instruction
|= inst
.operands
[1].imm
;
6927 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
6931 if (inst
.operands
[0].writeback
)
6932 inst
.instruction
|= WRITE_BACK
;
6934 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
6935 _("this addressing mode requires base-register writeback"));
6937 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6938 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
6940 count
= inst
.operands
[1].imm
<< 1;
6941 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
6944 inst
.instruction
|= count
;
6948 do_vfp_sp_ldstmia (void)
6950 vfp_sp_ldstm (VFP_LDSTMIA
);
6954 do_vfp_sp_ldstmdb (void)
6956 vfp_sp_ldstm (VFP_LDSTMDB
);
6960 do_vfp_dp_ldstmia (void)
6962 vfp_dp_ldstm (VFP_LDSTMIA
);
6966 do_vfp_dp_ldstmdb (void)
6968 vfp_dp_ldstm (VFP_LDSTMDB
);
6972 do_vfp_xp_ldstmia (void)
6974 vfp_dp_ldstm (VFP_LDSTMIAX
);
6978 do_vfp_xp_ldstmdb (void)
6980 vfp_dp_ldstm (VFP_LDSTMDBX
);
6984 do_vfp_dp_rd_rm (void)
6986 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6987 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6991 do_vfp_dp_rn_rd (void)
6993 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
6994 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
6998 do_vfp_dp_rd_rn (void)
7000 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7001 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7005 do_vfp_dp_rd_rn_rm (void)
7007 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7008 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7009 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
7015 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7019 do_vfp_dp_rm_rd_rn (void)
7021 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
7022 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7023 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
7026 /* VFPv3 instructions. */
7028 do_vfp_sp_const (void)
7030 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7031 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7032 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7036 do_vfp_dp_const (void)
7038 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7039 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7040 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7044 vfp_conv (int srcsize
)
7046 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
7047 inst
.instruction
|= (immbits
& 1) << 5;
7048 inst
.instruction
|= (immbits
>> 1);
7052 do_vfp_sp_conv_16 (void)
7054 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7059 do_vfp_dp_conv_16 (void)
7061 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7066 do_vfp_sp_conv_32 (void)
7068 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7073 do_vfp_dp_conv_32 (void)
7075 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7080 /* FPA instructions. Also in a logical order. */
7085 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7086 inst
.instruction
|= inst
.operands
[1].reg
;
7090 do_fpa_ldmstm (void)
7092 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7093 switch (inst
.operands
[1].imm
)
7095 case 1: inst
.instruction
|= CP_T_X
; break;
7096 case 2: inst
.instruction
|= CP_T_Y
; break;
7097 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
7102 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
7104 /* The instruction specified "ea" or "fd", so we can only accept
7105 [Rn]{!}. The instruction does not really support stacking or
7106 unstacking, so we have to emulate these by setting appropriate
7107 bits and offsets. */
7108 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7109 || inst
.reloc
.exp
.X_add_number
!= 0,
7110 _("this instruction does not support indexing"));
7112 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
7113 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
7115 if (!(inst
.instruction
& INDEX_UP
))
7116 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
7118 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
7120 inst
.operands
[2].preind
= 0;
7121 inst
.operands
[2].postind
= 1;
7125 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7128 /* iWMMXt instructions: strictly in alphabetical order. */
7131 do_iwmmxt_tandorc (void)
7133 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
7137 do_iwmmxt_textrc (void)
7139 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7140 inst
.instruction
|= inst
.operands
[1].imm
;
7144 do_iwmmxt_textrm (void)
7146 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7147 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7148 inst
.instruction
|= inst
.operands
[2].imm
;
7152 do_iwmmxt_tinsr (void)
7154 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7155 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7156 inst
.instruction
|= inst
.operands
[2].imm
;
7160 do_iwmmxt_tmia (void)
7162 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7163 inst
.instruction
|= inst
.operands
[1].reg
;
7164 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7168 do_iwmmxt_waligni (void)
7170 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7171 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7172 inst
.instruction
|= inst
.operands
[2].reg
;
7173 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
7177 do_iwmmxt_wmov (void)
7179 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7180 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7181 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7182 inst
.instruction
|= inst
.operands
[1].reg
;
7186 do_iwmmxt_wldstbh (void)
7189 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7190 inst
.reloc
.exp
.X_add_number
*= 4;
7192 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
7194 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
7195 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
7199 do_iwmmxt_wldstw (void)
7201 /* RIWR_RIWC clears .isreg for a control register. */
7202 if (!inst
.operands
[0].isreg
)
7204 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7205 inst
.instruction
|= 0xf0000000;
7208 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7209 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7213 do_iwmmxt_wldstd (void)
7215 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7216 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
7220 do_iwmmxt_wshufh (void)
7222 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7223 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7224 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
7225 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
7229 do_iwmmxt_wzero (void)
7231 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7232 inst
.instruction
|= inst
.operands
[0].reg
;
7233 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7234 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7237 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7238 operations first, then control, shift, and load/store. */
7240 /* Insns like "foo X,Y,Z". */
7243 do_mav_triple (void)
7245 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7246 inst
.instruction
|= inst
.operands
[1].reg
;
7247 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7250 /* Insns like "foo W,X,Y,Z".
7251 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7256 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7257 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7258 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7259 inst
.instruction
|= inst
.operands
[3].reg
;
7262 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7266 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7269 /* Maverick shift immediate instructions.
7270 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7271 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7276 int imm
= inst
.operands
[2].imm
;
7278 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7279 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7281 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7282 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7283 Bit 4 should be 0. */
7284 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
7286 inst
.instruction
|= imm
;
7289 /* XScale instructions. Also sorted arithmetic before move. */
7291 /* Xscale multiply-accumulate (argument parse)
7294 MIAxycc acc0,Rm,Rs. */
7299 inst
.instruction
|= inst
.operands
[1].reg
;
7300 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7303 /* Xscale move-accumulator-register (argument parse)
7305 MARcc acc0,RdLo,RdHi. */
7310 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7311 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7314 /* Xscale move-register-accumulator (argument parse)
7316 MRAcc RdLo,RdHi,acc0. */
7321 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
7322 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7323 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7326 /* Encoding functions relevant only to Thumb. */
7328 /* inst.operands[i] is a shifted-register operand; encode
7329 it into inst.instruction in the format used by Thumb32. */
7332 encode_thumb32_shifted_operand (int i
)
7334 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
7335 unsigned int shift
= inst
.operands
[i
].shift_kind
;
7337 constraint (inst
.operands
[i
].immisreg
,
7338 _("shift by register not allowed in thumb mode"));
7339 inst
.instruction
|= inst
.operands
[i
].reg
;
7340 if (shift
== SHIFT_RRX
)
7341 inst
.instruction
|= SHIFT_ROR
<< 4;
7344 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7345 _("expression too complex"));
7347 constraint (value
> 32
7348 || (value
== 32 && (shift
== SHIFT_LSL
7349 || shift
== SHIFT_ROR
)),
7350 _("shift expression is too large"));
7354 else if (value
== 32)
7357 inst
.instruction
|= shift
<< 4;
7358 inst
.instruction
|= (value
& 0x1c) << 10;
7359 inst
.instruction
|= (value
& 0x03) << 6;
7364 /* inst.operands[i] was set up by parse_address. Encode it into a
7365 Thumb32 format load or store instruction. Reject forms that cannot
7366 be used with such instructions. If is_t is true, reject forms that
7367 cannot be used with a T instruction; if is_d is true, reject forms
7368 that cannot be used with a D instruction. */
7371 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
7373 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7375 constraint (!inst
.operands
[i
].isreg
,
7376 _("Instruction does not support =N addresses"));
7378 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7379 if (inst
.operands
[i
].immisreg
)
7381 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
7382 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
7383 constraint (inst
.operands
[i
].negative
,
7384 _("Thumb does not support negative register indexing"));
7385 constraint (inst
.operands
[i
].postind
,
7386 _("Thumb does not support register post-indexing"));
7387 constraint (inst
.operands
[i
].writeback
,
7388 _("Thumb does not support register indexing with writeback"));
7389 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
7390 _("Thumb supports only LSL in shifted register indexing"));
7392 inst
.instruction
|= inst
.operands
[i
].imm
;
7393 if (inst
.operands
[i
].shifted
)
7395 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7396 _("expression too complex"));
7397 constraint (inst
.reloc
.exp
.X_add_number
< 0
7398 || inst
.reloc
.exp
.X_add_number
> 3,
7399 _("shift out of range"));
7400 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
7402 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7404 else if (inst
.operands
[i
].preind
)
7406 constraint (is_pc
&& inst
.operands
[i
].writeback
,
7407 _("cannot use writeback with PC-relative addressing"));
7408 constraint (is_t
&& inst
.operands
[i
].writeback
,
7409 _("cannot use writeback with this instruction"));
7413 inst
.instruction
|= 0x01000000;
7414 if (inst
.operands
[i
].writeback
)
7415 inst
.instruction
|= 0x00200000;
7419 inst
.instruction
|= 0x00000c00;
7420 if (inst
.operands
[i
].writeback
)
7421 inst
.instruction
|= 0x00000100;
7423 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7425 else if (inst
.operands
[i
].postind
)
7427 assert (inst
.operands
[i
].writeback
);
7428 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
7429 constraint (is_t
, _("cannot use post-indexing with this instruction"));
7432 inst
.instruction
|= 0x00200000;
7434 inst
.instruction
|= 0x00000900;
7435 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7437 else /* unindexed - only for coprocessor */
7438 inst
.error
= _("instruction does not accept unindexed addressing");
7441 /* Table of Thumb instructions which exist in both 16- and 32-bit
7442 encodings (the latter only in post-V6T2 cores). The index is the
7443 value used in the insns table below. When there is more than one
7444 possible 16-bit encoding for the instruction, this table always
7446 Also contains several pseudo-instructions used during relaxation. */
7447 #define T16_32_TAB \
7448 X(adc, 4140, eb400000), \
7449 X(adcs, 4140, eb500000), \
7450 X(add, 1c00, eb000000), \
7451 X(adds, 1c00, eb100000), \
7452 X(addi, 0000, f1000000), \
7453 X(addis, 0000, f1100000), \
7454 X(add_pc,000f, f20f0000), \
7455 X(add_sp,000d, f10d0000), \
7456 X(adr, 000f, f20f0000), \
7457 X(and, 4000, ea000000), \
7458 X(ands, 4000, ea100000), \
7459 X(asr, 1000, fa40f000), \
7460 X(asrs, 1000, fa50f000), \
7461 X(b, e000, f000b000), \
7462 X(bcond, d000, f0008000), \
7463 X(bic, 4380, ea200000), \
7464 X(bics, 4380, ea300000), \
7465 X(cmn, 42c0, eb100f00), \
7466 X(cmp, 2800, ebb00f00), \
7467 X(cpsie, b660, f3af8400), \
7468 X(cpsid, b670, f3af8600), \
7469 X(cpy, 4600, ea4f0000), \
7470 X(dec_sp,80dd, f1bd0d00), \
7471 X(eor, 4040, ea800000), \
7472 X(eors, 4040, ea900000), \
7473 X(inc_sp,00dd, f10d0d00), \
7474 X(ldmia, c800, e8900000), \
7475 X(ldr, 6800, f8500000), \
7476 X(ldrb, 7800, f8100000), \
7477 X(ldrh, 8800, f8300000), \
7478 X(ldrsb, 5600, f9100000), \
7479 X(ldrsh, 5e00, f9300000), \
7480 X(ldr_pc,4800, f85f0000), \
7481 X(ldr_pc2,4800, f85f0000), \
7482 X(ldr_sp,9800, f85d0000), \
7483 X(lsl, 0000, fa00f000), \
7484 X(lsls, 0000, fa10f000), \
7485 X(lsr, 0800, fa20f000), \
7486 X(lsrs, 0800, fa30f000), \
7487 X(mov, 2000, ea4f0000), \
7488 X(movs, 2000, ea5f0000), \
7489 X(mul, 4340, fb00f000), \
7490 X(muls, 4340, ffffffff), /* no 32b muls */ \
7491 X(mvn, 43c0, ea6f0000), \
7492 X(mvns, 43c0, ea7f0000), \
7493 X(neg, 4240, f1c00000), /* rsb #0 */ \
7494 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7495 X(orr, 4300, ea400000), \
7496 X(orrs, 4300, ea500000), \
7497 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7498 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7499 X(rev, ba00, fa90f080), \
7500 X(rev16, ba40, fa90f090), \
7501 X(revsh, bac0, fa90f0b0), \
7502 X(ror, 41c0, fa60f000), \
7503 X(rors, 41c0, fa70f000), \
7504 X(sbc, 4180, eb600000), \
7505 X(sbcs, 4180, eb700000), \
7506 X(stmia, c000, e8800000), \
7507 X(str, 6000, f8400000), \
7508 X(strb, 7000, f8000000), \
7509 X(strh, 8000, f8200000), \
7510 X(str_sp,9000, f84d0000), \
7511 X(sub, 1e00, eba00000), \
7512 X(subs, 1e00, ebb00000), \
7513 X(subi, 8000, f1a00000), \
7514 X(subis, 8000, f1b00000), \
7515 X(sxtb, b240, fa4ff080), \
7516 X(sxth, b200, fa0ff080), \
7517 X(tst, 4200, ea100f00), \
7518 X(uxtb, b2c0, fa5ff080), \
7519 X(uxth, b280, fa1ff080), \
7520 X(nop, bf00, f3af8000), \
7521 X(yield, bf10, f3af8001), \
7522 X(wfe, bf20, f3af8002), \
7523 X(wfi, bf30, f3af8003), \
7524 X(sev, bf40, f3af9004), /* typo, 8004? */
7526 /* To catch errors in encoding functions, the codes are all offset by
7527 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7528 as 16-bit instructions. */
7529 #define X(a,b,c) T_MNEM_##a
7530 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
7533 #define X(a,b,c) 0x##b
7534 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
7535 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7538 #define X(a,b,c) 0x##c
7539 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
7540 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7541 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7545 /* Thumb instruction encoders, in alphabetical order. */
7549 do_t_add_sub_w (void)
7553 Rd
= inst
.operands
[0].reg
;
7554 Rn
= inst
.operands
[1].reg
;
7556 constraint (Rd
== 15, _("PC not allowed as destination"));
7557 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
7558 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
7561 /* Parse an add or subtract instruction. We get here with inst.instruction
7562 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7569 Rd
= inst
.operands
[0].reg
;
7570 Rs
= (inst
.operands
[1].present
7571 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7572 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7580 flags
= (inst
.instruction
== T_MNEM_adds
7581 || inst
.instruction
== T_MNEM_subs
);
7583 narrow
= (current_it_mask
== 0);
7585 narrow
= (current_it_mask
!= 0);
7586 if (!inst
.operands
[2].isreg
)
7589 if (inst
.size_req
!= 4)
7593 add
= (inst
.instruction
== T_MNEM_add
7594 || inst
.instruction
== T_MNEM_adds
);
7595 /* Attempt to use a narrow opcode, with relaxation if
7597 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
7598 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
7599 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
7600 opcode
= T_MNEM_add_sp
;
7601 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
7602 opcode
= T_MNEM_add_pc
;
7603 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
7606 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
7608 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
7612 inst
.instruction
= THUMB_OP16(opcode
);
7613 inst
.instruction
|= (Rd
<< 4) | Rs
;
7614 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7615 if (inst
.size_req
!= 2)
7616 inst
.relax
= opcode
;
7619 constraint (inst
.size_req
== 2, BAD_HIREG
);
7621 if (inst
.size_req
== 4
7622 || (inst
.size_req
!= 2 && !opcode
))
7624 /* ??? Convert large immediates to addw/subw. */
7625 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7626 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7627 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7628 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7629 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7634 Rn
= inst
.operands
[2].reg
;
7635 /* See if we can do this with a 16-bit instruction. */
7636 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
7638 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7643 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
7644 || inst
.instruction
== T_MNEM_add
)
7647 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7651 if (inst
.instruction
== T_MNEM_add
)
7655 inst
.instruction
= T_OPCODE_ADD_HI
;
7656 inst
.instruction
|= (Rd
& 8) << 4;
7657 inst
.instruction
|= (Rd
& 7);
7658 inst
.instruction
|= Rn
<< 3;
7661 /* ... because addition is commutative! */
7664 inst
.instruction
= T_OPCODE_ADD_HI
;
7665 inst
.instruction
|= (Rd
& 8) << 4;
7666 inst
.instruction
|= (Rd
& 7);
7667 inst
.instruction
|= Rs
<< 3;
7672 /* If we get here, it can't be done in 16 bits. */
7673 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
7674 _("shift must be constant"));
7675 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7676 inst
.instruction
|= Rd
<< 8;
7677 inst
.instruction
|= Rs
<< 16;
7678 encode_thumb32_shifted_operand (2);
7683 constraint (inst
.instruction
== T_MNEM_adds
7684 || inst
.instruction
== T_MNEM_subs
,
7687 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
7689 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
7690 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
7693 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7695 inst
.instruction
|= (Rd
<< 4) | Rs
;
7696 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7700 Rn
= inst
.operands
[2].reg
;
7701 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
7703 /* We now have Rd, Rs, and Rn set to registers. */
7704 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7706 /* Can't do this for SUB. */
7707 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
7708 inst
.instruction
= T_OPCODE_ADD_HI
;
7709 inst
.instruction
|= (Rd
& 8) << 4;
7710 inst
.instruction
|= (Rd
& 7);
7712 inst
.instruction
|= Rn
<< 3;
7714 inst
.instruction
|= Rs
<< 3;
7716 constraint (1, _("dest must overlap one source register"));
7720 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7721 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
7722 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7730 if (unified_syntax
&& inst
.size_req
== 0 && inst
.operands
[0].reg
<= 7)
7732 /* Defer to section relaxation. */
7733 inst
.relax
= inst
.instruction
;
7734 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7735 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
7737 else if (unified_syntax
&& inst
.size_req
!= 2)
7739 /* Generate a 32-bit opcode. */
7740 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7741 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7742 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
7743 inst
.reloc
.pc_rel
= 1;
7747 /* Generate a 16-bit opcode. */
7748 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7749 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7750 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
7751 inst
.reloc
.pc_rel
= 1;
7753 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
7757 /* Arithmetic instructions for which there is just one 16-bit
7758 instruction encoding, and it allows only two low registers.
7759 For maximal compatibility with ARM syntax, we allow three register
7760 operands even when Thumb-32 instructions are not available, as long
7761 as the first two are identical. For instance, both "sbc r0,r1" and
7762 "sbc r0,r0,r1" are allowed. */
7768 Rd
= inst
.operands
[0].reg
;
7769 Rs
= (inst
.operands
[1].present
7770 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7771 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7772 Rn
= inst
.operands
[2].reg
;
7776 if (!inst
.operands
[2].isreg
)
7778 /* For an immediate, we always generate a 32-bit opcode;
7779 section relaxation will shrink it later if possible. */
7780 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7781 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7782 inst
.instruction
|= Rd
<< 8;
7783 inst
.instruction
|= Rs
<< 16;
7784 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7790 /* See if we can do this with a 16-bit instruction. */
7791 if (THUMB_SETS_FLAGS (inst
.instruction
))
7792 narrow
= current_it_mask
== 0;
7794 narrow
= current_it_mask
!= 0;
7796 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
7798 if (inst
.operands
[2].shifted
)
7800 if (inst
.size_req
== 4)
7806 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7807 inst
.instruction
|= Rd
;
7808 inst
.instruction
|= Rn
<< 3;
7812 /* If we get here, it can't be done in 16 bits. */
7813 constraint (inst
.operands
[2].shifted
7814 && inst
.operands
[2].immisreg
,
7815 _("shift must be constant"));
7816 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7817 inst
.instruction
|= Rd
<< 8;
7818 inst
.instruction
|= Rs
<< 16;
7819 encode_thumb32_shifted_operand (2);
7824 /* On its face this is a lie - the instruction does set the
7825 flags. However, the only supported mnemonic in this mode
7827 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
7829 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
7830 _("unshifted register required"));
7831 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
7832 constraint (Rd
!= Rs
,
7833 _("dest and source1 must be the same register"));
7835 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7836 inst
.instruction
|= Rd
;
7837 inst
.instruction
|= Rn
<< 3;
7841 /* Similarly, but for instructions where the arithmetic operation is
7842 commutative, so we can allow either of them to be different from
7843 the destination operand in a 16-bit instruction. For instance, all
7844 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7851 Rd
= inst
.operands
[0].reg
;
7852 Rs
= (inst
.operands
[1].present
7853 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7854 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7855 Rn
= inst
.operands
[2].reg
;
7859 if (!inst
.operands
[2].isreg
)
7861 /* For an immediate, we always generate a 32-bit opcode;
7862 section relaxation will shrink it later if possible. */
7863 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7864 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7865 inst
.instruction
|= Rd
<< 8;
7866 inst
.instruction
|= Rs
<< 16;
7867 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7873 /* See if we can do this with a 16-bit instruction. */
7874 if (THUMB_SETS_FLAGS (inst
.instruction
))
7875 narrow
= current_it_mask
== 0;
7877 narrow
= current_it_mask
!= 0;
7879 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
7881 if (inst
.operands
[2].shifted
)
7883 if (inst
.size_req
== 4)
7890 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7891 inst
.instruction
|= Rd
;
7892 inst
.instruction
|= Rn
<< 3;
7897 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7898 inst
.instruction
|= Rd
;
7899 inst
.instruction
|= Rs
<< 3;
7904 /* If we get here, it can't be done in 16 bits. */
7905 constraint (inst
.operands
[2].shifted
7906 && inst
.operands
[2].immisreg
,
7907 _("shift must be constant"));
7908 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7909 inst
.instruction
|= Rd
<< 8;
7910 inst
.instruction
|= Rs
<< 16;
7911 encode_thumb32_shifted_operand (2);
7916 /* On its face this is a lie - the instruction does set the
7917 flags. However, the only supported mnemonic in this mode
7919 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
7921 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
7922 _("unshifted register required"));
7923 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
7925 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7926 inst
.instruction
|= Rd
;
7929 inst
.instruction
|= Rn
<< 3;
7931 inst
.instruction
|= Rs
<< 3;
7933 constraint (1, _("dest must overlap one source register"));
7940 if (inst
.operands
[0].present
)
7942 constraint ((inst
.instruction
& 0xf0) != 0x40
7943 && inst
.operands
[0].imm
!= 0xf,
7944 "bad barrier type");
7945 inst
.instruction
|= inst
.operands
[0].imm
;
7948 inst
.instruction
|= 0xf;
7954 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
7955 constraint (msb
> 32, _("bit-field extends past end of register"));
7956 /* The instruction encoding stores the LSB and MSB,
7957 not the LSB and width. */
7958 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7959 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
7960 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
7961 inst
.instruction
|= msb
- 1;
7969 /* #0 in second position is alternative syntax for bfc, which is
7970 the same instruction but with REG_PC in the Rm field. */
7971 if (!inst
.operands
[1].isreg
)
7972 inst
.operands
[1].reg
= REG_PC
;
7974 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
7975 constraint (msb
> 32, _("bit-field extends past end of register"));
7976 /* The instruction encoding stores the LSB and MSB,
7977 not the LSB and width. */
7978 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7979 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7980 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
7981 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
7982 inst
.instruction
|= msb
- 1;
7988 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
7989 _("bit-field extends past end of register"));
7990 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7991 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7992 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
7993 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
7994 inst
.instruction
|= inst
.operands
[3].imm
- 1;
7997 /* ARM V5 Thumb BLX (argument parse)
7998 BLX <target_addr> which is BLX(1)
7999 BLX <Rm> which is BLX(2)
8000 Unfortunately, there are two different opcodes for this mnemonic.
8001 So, the insns[].value is not used, and the code here zaps values
8002 into inst.instruction.
8004 ??? How to take advantage of the additional two bits of displacement
8005 available in Thumb32 mode? Need new relocation? */
8010 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8011 if (inst
.operands
[0].isreg
)
8012 /* We have a register, so this is BLX(2). */
8013 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8016 /* No register. This must be BLX(1). */
8017 inst
.instruction
= 0xf000e800;
8019 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8020 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8023 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
8024 inst
.reloc
.pc_rel
= 1;
8034 if (current_it_mask
)
8036 /* Conditional branches inside IT blocks are encoded as unconditional
8039 /* A branch must be the last instruction in an IT block. */
8040 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
8045 if (cond
!= COND_ALWAYS
)
8046 opcode
= T_MNEM_bcond
;
8048 opcode
= inst
.instruction
;
8050 if (unified_syntax
&& inst
.size_req
== 4)
8052 inst
.instruction
= THUMB_OP32(opcode
);
8053 if (cond
== COND_ALWAYS
)
8054 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
8057 assert (cond
!= 0xF);
8058 inst
.instruction
|= cond
<< 22;
8059 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
8064 inst
.instruction
= THUMB_OP16(opcode
);
8065 if (cond
== COND_ALWAYS
)
8066 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
8069 inst
.instruction
|= cond
<< 8;
8070 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
8072 /* Allow section relaxation. */
8073 if (unified_syntax
&& inst
.size_req
!= 2)
8074 inst
.relax
= opcode
;
8077 inst
.reloc
.pc_rel
= 1;
8083 constraint (inst
.cond
!= COND_ALWAYS
,
8084 _("instruction is always unconditional"));
8085 if (inst
.operands
[0].present
)
8087 constraint (inst
.operands
[0].imm
> 255,
8088 _("immediate value out of range"));
8089 inst
.instruction
|= inst
.operands
[0].imm
;
8094 do_t_branch23 (void)
8096 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8097 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8098 inst
.reloc
.pc_rel
= 1;
8100 /* If the destination of the branch is a defined symbol which does not have
8101 the THUMB_FUNC attribute, then we must be calling a function which has
8102 the (interfacearm) attribute. We look for the Thumb entry point to that
8103 function and change the branch to refer to that function instead. */
8104 if ( inst
.reloc
.exp
.X_op
== O_symbol
8105 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8106 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8107 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8108 inst
.reloc
.exp
.X_add_symbol
=
8109 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
8115 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8116 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8117 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8118 should cause the alignment to be checked once it is known. This is
8119 because BX PC only works if the instruction is word aligned. */
8125 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8126 if (inst
.operands
[0].reg
== REG_PC
)
8127 as_tsktsk (_("use of r15 in bxj is not really useful"));
8129 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8135 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8136 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8137 inst
.instruction
|= inst
.operands
[1].reg
;
8143 constraint (current_it_mask
, BAD_NOT_IT
);
8144 inst
.instruction
|= inst
.operands
[0].imm
;
8150 constraint (current_it_mask
, BAD_NOT_IT
);
8152 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
8153 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
8155 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
8156 inst
.instruction
= 0xf3af8000;
8157 inst
.instruction
|= imod
<< 9;
8158 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
8159 if (inst
.operands
[1].present
)
8160 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
8164 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
8165 && (inst
.operands
[0].imm
& 4),
8166 _("selected processor does not support 'A' form "
8167 "of this instruction"));
8168 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
8169 _("Thumb does not support the 2-argument "
8170 "form of this instruction"));
8171 inst
.instruction
|= inst
.operands
[0].imm
;
8175 /* THUMB CPY instruction (argument parse). */
8180 if (inst
.size_req
== 4)
8182 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
8183 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8184 inst
.instruction
|= inst
.operands
[1].reg
;
8188 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8189 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8190 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8197 constraint (current_it_mask
, BAD_NOT_IT
);
8198 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8199 inst
.instruction
|= inst
.operands
[0].reg
;
8200 inst
.reloc
.pc_rel
= 1;
8201 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
8207 inst
.instruction
|= inst
.operands
[0].imm
;
8213 if (!inst
.operands
[1].present
)
8214 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8215 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8216 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8217 inst
.instruction
|= inst
.operands
[2].reg
;
8223 if (unified_syntax
&& inst
.size_req
== 4)
8224 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8226 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8232 unsigned int cond
= inst
.operands
[0].imm
;
8234 constraint (current_it_mask
, BAD_NOT_IT
);
8235 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
8238 /* If the condition is a negative condition, invert the mask. */
8239 if ((cond
& 0x1) == 0x0)
8241 unsigned int mask
= inst
.instruction
& 0x000f;
8243 if ((mask
& 0x7) == 0)
8244 /* no conversion needed */;
8245 else if ((mask
& 0x3) == 0)
8247 else if ((mask
& 0x1) == 0)
8252 inst
.instruction
&= 0xfff0;
8253 inst
.instruction
|= mask
;
8256 inst
.instruction
|= cond
<< 4;
8262 /* This really doesn't seem worth it. */
8263 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
8264 _("expression too complex"));
8265 constraint (inst
.operands
[1].writeback
,
8266 _("Thumb load/store multiple does not support {reglist}^"));
8270 /* See if we can use a 16-bit instruction. */
8271 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
8272 && inst
.size_req
!= 4
8273 && inst
.operands
[0].reg
<= 7
8274 && !(inst
.operands
[1].imm
& ~0xff)
8275 && (inst
.instruction
== T_MNEM_stmia
8276 ? inst
.operands
[0].writeback
8277 : (inst
.operands
[0].writeback
8278 == !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))))
8280 if (inst
.instruction
== T_MNEM_stmia
8281 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8282 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
8283 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8284 inst
.operands
[0].reg
);
8286 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8287 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8288 inst
.instruction
|= inst
.operands
[1].imm
;
8292 if (inst
.operands
[1].imm
& (1 << 13))
8293 as_warn (_("SP should not be in register list"));
8294 if (inst
.instruction
== T_MNEM_stmia
)
8296 if (inst
.operands
[1].imm
& (1 << 15))
8297 as_warn (_("PC should not be in register list"));
8298 if (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8299 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8300 inst
.operands
[0].reg
);
8304 if (inst
.operands
[1].imm
& (1 << 14)
8305 && inst
.operands
[1].imm
& (1 << 15))
8306 as_warn (_("LR and PC should not both be in register list"));
8307 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8308 && inst
.operands
[0].writeback
)
8309 as_warn (_("base register should not be in register list "
8310 "when written back"));
8312 if (inst
.instruction
< 0xffff)
8313 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8314 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8315 inst
.instruction
|= inst
.operands
[1].imm
;
8316 if (inst
.operands
[0].writeback
)
8317 inst
.instruction
|= WRITE_BACK
;
8322 constraint (inst
.operands
[0].reg
> 7
8323 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
8324 if (inst
.instruction
== T_MNEM_stmia
)
8326 if (!inst
.operands
[0].writeback
)
8327 as_warn (_("this instruction will write back the base register"));
8328 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8329 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
8330 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8331 inst
.operands
[0].reg
);
8335 if (!inst
.operands
[0].writeback
8336 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8337 as_warn (_("this instruction will write back the base register"));
8338 else if (inst
.operands
[0].writeback
8339 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8340 as_warn (_("this instruction will not write back the base register"));
8343 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8344 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8345 inst
.instruction
|= inst
.operands
[1].imm
;
8352 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8353 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8354 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8355 || inst
.operands
[1].negative
,
8358 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8359 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8360 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
8366 if (!inst
.operands
[1].present
)
8368 constraint (inst
.operands
[0].reg
== REG_LR
,
8369 _("r14 not allowed as first register "
8370 "when second register is omitted"));
8371 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8373 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
8376 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8377 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8378 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8384 unsigned long opcode
;
8387 opcode
= inst
.instruction
;
8390 if (!inst
.operands
[1].isreg
)
8392 if (opcode
<= 0xffff)
8393 inst
.instruction
= THUMB_OP32 (opcode
);
8394 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8397 if (inst
.operands
[1].isreg
8398 && !inst
.operands
[1].writeback
8399 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
8400 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
8402 && inst
.size_req
!= 4)
8404 /* Insn may have a 16-bit form. */
8405 Rn
= inst
.operands
[1].reg
;
8406 if (inst
.operands
[1].immisreg
)
8408 inst
.instruction
= THUMB_OP16 (opcode
);
8410 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
8413 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
8414 && opcode
!= T_MNEM_ldrsb
)
8415 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
8416 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
8423 if (inst
.reloc
.pc_rel
)
8424 opcode
= T_MNEM_ldr_pc2
;
8426 opcode
= T_MNEM_ldr_pc
;
8430 if (opcode
== T_MNEM_ldr
)
8431 opcode
= T_MNEM_ldr_sp
;
8433 opcode
= T_MNEM_str_sp
;
8435 inst
.instruction
= inst
.operands
[0].reg
<< 8;
8439 inst
.instruction
= inst
.operands
[0].reg
;
8440 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8442 inst
.instruction
|= THUMB_OP16 (opcode
);
8443 if (inst
.size_req
== 2)
8444 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8446 inst
.relax
= opcode
;
8450 /* Definitely a 32-bit variant. */
8451 inst
.instruction
= THUMB_OP32 (opcode
);
8452 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8453 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
8457 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8459 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
8461 /* Only [Rn,Rm] is acceptable. */
8462 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
8463 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
8464 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
8465 || inst
.operands
[1].negative
,
8466 _("Thumb does not support this addressing mode"));
8467 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8471 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8472 if (!inst
.operands
[1].isreg
)
8473 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8476 constraint (!inst
.operands
[1].preind
8477 || inst
.operands
[1].shifted
8478 || inst
.operands
[1].writeback
,
8479 _("Thumb does not support this addressing mode"));
8480 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
8482 constraint (inst
.instruction
& 0x0600,
8483 _("byte or halfword not valid for base register"));
8484 constraint (inst
.operands
[1].reg
== REG_PC
8485 && !(inst
.instruction
& THUMB_LOAD_BIT
),
8486 _("r15 based store not allowed"));
8487 constraint (inst
.operands
[1].immisreg
,
8488 _("invalid base register for register offset"));
8490 if (inst
.operands
[1].reg
== REG_PC
)
8491 inst
.instruction
= T_OPCODE_LDR_PC
;
8492 else if (inst
.instruction
& THUMB_LOAD_BIT
)
8493 inst
.instruction
= T_OPCODE_LDR_SP
;
8495 inst
.instruction
= T_OPCODE_STR_SP
;
8497 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8498 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8502 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
8503 if (!inst
.operands
[1].immisreg
)
8505 /* Immediate offset. */
8506 inst
.instruction
|= inst
.operands
[0].reg
;
8507 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8508 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8512 /* Register offset. */
8513 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
8514 constraint (inst
.operands
[1].negative
,
8515 _("Thumb does not support this addressing mode"));
8518 switch (inst
.instruction
)
8520 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
8521 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
8522 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
8523 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
8524 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
8525 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
8526 case 0x5600 /* ldrsb */:
8527 case 0x5e00 /* ldrsh */: break;
8531 inst
.instruction
|= inst
.operands
[0].reg
;
8532 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8533 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
8539 if (!inst
.operands
[1].present
)
8541 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8542 constraint (inst
.operands
[0].reg
== REG_LR
,
8543 _("r14 not allowed here"));
8545 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8546 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8547 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
8554 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8555 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
8561 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8562 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8563 inst
.instruction
|= inst
.operands
[2].reg
;
8564 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8570 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8571 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8572 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8573 inst
.instruction
|= inst
.operands
[3].reg
;
8581 int r0off
= (inst
.instruction
== T_MNEM_mov
8582 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
8583 unsigned long opcode
;
8585 bfd_boolean low_regs
;
8587 low_regs
= (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7);
8588 opcode
= inst
.instruction
;
8589 if (current_it_mask
)
8590 narrow
= opcode
!= T_MNEM_movs
;
8592 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
8593 if (inst
.size_req
== 4
8594 || inst
.operands
[1].shifted
)
8597 if (!inst
.operands
[1].isreg
)
8599 /* Immediate operand. */
8600 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
8602 if (low_regs
&& narrow
)
8604 inst
.instruction
= THUMB_OP16 (opcode
);
8605 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8606 if (inst
.size_req
== 2)
8607 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8609 inst
.relax
= opcode
;
8613 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8614 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8615 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8616 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8621 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8622 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8623 encode_thumb32_shifted_operand (1);
8626 switch (inst
.instruction
)
8629 inst
.instruction
= T_OPCODE_MOV_HR
;
8630 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8631 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8632 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8636 /* We know we have low registers at this point.
8637 Generate ADD Rd, Rs, #0. */
8638 inst
.instruction
= T_OPCODE_ADD_I3
;
8639 inst
.instruction
|= inst
.operands
[0].reg
;
8640 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8646 inst
.instruction
= T_OPCODE_CMP_LR
;
8647 inst
.instruction
|= inst
.operands
[0].reg
;
8648 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8652 inst
.instruction
= T_OPCODE_CMP_HR
;
8653 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8654 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8655 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8662 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8663 if (inst
.operands
[1].isreg
)
8665 if (inst
.operands
[0].reg
< 8 && inst
.operands
[1].reg
< 8)
8667 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8668 since a MOV instruction produces unpredictable results. */
8669 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8670 inst
.instruction
= T_OPCODE_ADD_I3
;
8672 inst
.instruction
= T_OPCODE_CMP_LR
;
8674 inst
.instruction
|= inst
.operands
[0].reg
;
8675 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8679 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8680 inst
.instruction
= T_OPCODE_MOV_HR
;
8682 inst
.instruction
= T_OPCODE_CMP_HR
;
8688 constraint (inst
.operands
[0].reg
> 7,
8689 _("only lo regs allowed with immediate"));
8690 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8691 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8698 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8699 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf000) << 4;
8700 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0800) << 15;
8701 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0700) << 4;
8702 inst
.instruction
|= (inst
.operands
[1].imm
& 0x00ff);
8710 int r0off
= (inst
.instruction
== T_MNEM_mvn
8711 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
8714 if (inst
.size_req
== 4
8715 || inst
.instruction
> 0xffff
8716 || inst
.operands
[1].shifted
8717 || inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
8719 else if (inst
.instruction
== T_MNEM_cmn
)
8721 else if (THUMB_SETS_FLAGS (inst
.instruction
))
8722 narrow
= (current_it_mask
== 0);
8724 narrow
= (current_it_mask
!= 0);
8726 if (!inst
.operands
[1].isreg
)
8728 /* For an immediate, we always generate a 32-bit opcode;
8729 section relaxation will shrink it later if possible. */
8730 if (inst
.instruction
< 0xffff)
8731 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8732 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8733 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8734 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8738 /* See if we can do this with a 16-bit instruction. */
8741 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8742 inst
.instruction
|= inst
.operands
[0].reg
;
8743 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8747 constraint (inst
.operands
[1].shifted
8748 && inst
.operands
[1].immisreg
,
8749 _("shift must be constant"));
8750 if (inst
.instruction
< 0xffff)
8751 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8752 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8753 encode_thumb32_shifted_operand (1);
8759 constraint (inst
.instruction
> 0xffff
8760 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
8761 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
8762 _("unshifted register required"));
8763 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8766 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8767 inst
.instruction
|= inst
.operands
[0].reg
;
8768 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8776 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
8779 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
8780 _("selected processor does not support "
8781 "requested special purpose register"));
8785 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
8786 _("selected processor does not support "
8787 "requested special purpose register %x"));
8788 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8789 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
8790 _("'CPSR' or 'SPSR' expected"));
8793 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8794 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
8795 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
8803 constraint (!inst
.operands
[1].isreg
,
8804 _("Thumb encoding does not support an immediate here"));
8805 flags
= inst
.operands
[0].imm
;
8808 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
8809 _("selected processor does not support "
8810 "requested special purpose register"));
8814 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
8815 _("selected processor does not support "
8816 "requested special purpose register"));
8819 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
8820 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
8821 inst
.instruction
|= (flags
& 0xff);
8822 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8828 if (!inst
.operands
[2].present
)
8829 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
8831 /* There is no 32-bit MULS and no 16-bit MUL. */
8832 if (unified_syntax
&& inst
.instruction
== T_MNEM_mul
)
8834 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8835 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8836 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8837 inst
.instruction
|= inst
.operands
[2].reg
<< 0;
8841 constraint (!unified_syntax
8842 && inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
8843 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8846 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8847 inst
.instruction
|= inst
.operands
[0].reg
;
8849 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8850 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
8851 else if (inst
.operands
[0].reg
== inst
.operands
[2].reg
)
8852 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8854 constraint (1, _("dest must overlap one source register"));
8861 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8862 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8863 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8864 inst
.instruction
|= inst
.operands
[3].reg
;
8866 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8867 as_tsktsk (_("rdhi and rdlo must be different"));
8875 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
8877 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8878 inst
.instruction
|= inst
.operands
[0].imm
;
8882 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8883 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
8888 constraint (inst
.operands
[0].present
,
8889 _("Thumb does not support NOP with hints"));
8890 inst
.instruction
= 0x46c0;
8901 if (THUMB_SETS_FLAGS (inst
.instruction
))
8902 narrow
= (current_it_mask
== 0);
8904 narrow
= (current_it_mask
!= 0);
8905 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
8907 if (inst
.size_req
== 4)
8912 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8913 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8914 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8918 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8919 inst
.instruction
|= inst
.operands
[0].reg
;
8920 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8925 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8927 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8929 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8930 inst
.instruction
|= inst
.operands
[0].reg
;
8931 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8938 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8939 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8940 inst
.instruction
|= inst
.operands
[2].reg
;
8941 if (inst
.operands
[3].present
)
8943 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
8944 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8945 _("expression too complex"));
8946 inst
.instruction
|= (val
& 0x1c) << 10;
8947 inst
.instruction
|= (val
& 0x03) << 6;
8954 if (!inst
.operands
[3].present
)
8955 inst
.instruction
&= ~0x00000020;
8962 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
8966 do_t_push_pop (void)
8970 constraint (inst
.operands
[0].writeback
,
8971 _("push/pop do not support {reglist}^"));
8972 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
8973 _("expression too complex"));
8975 mask
= inst
.operands
[0].imm
;
8976 if ((mask
& ~0xff) == 0)
8977 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8978 else if ((inst
.instruction
== T_MNEM_push
8979 && (mask
& ~0xff) == 1 << REG_LR
)
8980 || (inst
.instruction
== T_MNEM_pop
8981 && (mask
& ~0xff) == 1 << REG_PC
))
8983 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8984 inst
.instruction
|= THUMB_PP_PC_LR
;
8987 else if (unified_syntax
)
8989 if (mask
& (1 << 13))
8990 inst
.error
= _("SP not allowed in register list");
8991 if (inst
.instruction
== T_MNEM_push
)
8993 if (mask
& (1 << 15))
8994 inst
.error
= _("PC not allowed in register list");
8998 if (mask
& (1 << 14)
8999 && mask
& (1 << 15))
9000 inst
.error
= _("LR and PC should not both be in register list");
9002 if ((mask
& (mask
- 1)) == 0)
9004 /* Single register push/pop implemented as str/ldr. */
9005 if (inst
.instruction
== T_MNEM_push
)
9006 inst
.instruction
= 0xf84d0d04; /* str reg, [sp, #-4]! */
9008 inst
.instruction
= 0xf85d0b04; /* ldr reg, [sp], #4 */
9009 mask
= ffs(mask
) - 1;
9013 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9017 inst
.error
= _("invalid register list to push/pop instruction");
9021 inst
.instruction
|= mask
;
9027 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9028 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9034 if (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9035 && inst
.size_req
!= 4)
9037 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9038 inst
.instruction
|= inst
.operands
[0].reg
;
9039 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9041 else if (unified_syntax
)
9043 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9044 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9045 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9046 inst
.instruction
|= inst
.operands
[1].reg
;
9049 inst
.error
= BAD_HIREG
;
9057 Rd
= inst
.operands
[0].reg
;
9058 Rs
= (inst
.operands
[1].present
9059 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9060 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9062 inst
.instruction
|= Rd
<< 8;
9063 inst
.instruction
|= Rs
<< 16;
9064 if (!inst
.operands
[2].isreg
)
9066 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9067 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9070 encode_thumb32_shifted_operand (2);
9076 constraint (current_it_mask
, BAD_NOT_IT
);
9077 if (inst
.operands
[0].imm
)
9078 inst
.instruction
|= 0x8;
9084 if (!inst
.operands
[1].present
)
9085 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9092 switch (inst
.instruction
)
9095 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
9097 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
9099 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
9101 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
9105 if (THUMB_SETS_FLAGS (inst
.instruction
))
9106 narrow
= (current_it_mask
== 0);
9108 narrow
= (current_it_mask
!= 0);
9109 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9111 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
9113 if (inst
.operands
[2].isreg
9114 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
9115 || inst
.operands
[2].reg
> 7))
9117 if (inst
.size_req
== 4)
9122 if (inst
.operands
[2].isreg
)
9124 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9125 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9126 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9127 inst
.instruction
|= inst
.operands
[2].reg
;
9131 inst
.operands
[1].shifted
= 1;
9132 inst
.operands
[1].shift_kind
= shift_kind
;
9133 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
9134 ? T_MNEM_movs
: T_MNEM_mov
);
9135 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9136 encode_thumb32_shifted_operand (1);
9137 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9138 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9143 if (inst
.operands
[2].isreg
)
9147 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9148 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9149 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9150 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9154 inst
.instruction
|= inst
.operands
[0].reg
;
9155 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9161 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9162 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9163 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9166 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9167 inst
.instruction
|= inst
.operands
[0].reg
;
9168 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9174 constraint (inst
.operands
[0].reg
> 7
9175 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
9176 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9178 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
9180 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
9181 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
9182 _("source1 and dest must be same register"));
9184 switch (inst
.instruction
)
9186 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9187 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9188 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9189 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9193 inst
.instruction
|= inst
.operands
[0].reg
;
9194 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9198 switch (inst
.instruction
)
9200 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9201 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9202 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9203 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
9206 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9207 inst
.instruction
|= inst
.operands
[0].reg
;
9208 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9216 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9217 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9218 inst
.instruction
|= inst
.operands
[2].reg
;
9224 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
9225 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9226 _("expression too complex"));
9227 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9228 inst
.instruction
|= (value
& 0xf000) >> 12;
9229 inst
.instruction
|= (value
& 0x0ff0);
9230 inst
.instruction
|= (value
& 0x000f) << 16;
9236 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9237 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9238 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9240 if (inst
.operands
[3].present
)
9242 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9243 _("expression too complex"));
9245 if (inst
.reloc
.exp
.X_add_number
!= 0)
9247 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9248 inst
.instruction
|= 0x00200000; /* sh bit */
9249 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9250 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9252 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9259 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9260 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9261 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9267 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9268 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9269 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9270 || inst
.operands
[2].negative
,
9273 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9274 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9275 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9276 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9282 if (!inst
.operands
[2].present
)
9283 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
9285 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9286 || inst
.operands
[0].reg
== inst
.operands
[2].reg
9287 || inst
.operands
[0].reg
== inst
.operands
[3].reg
9288 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
9291 inst
.instruction
|= inst
.operands
[0].reg
;
9292 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9293 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9294 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9300 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9301 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9302 inst
.instruction
|= inst
.operands
[2].reg
;
9303 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
9309 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
9310 && inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9311 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
9313 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9314 inst
.instruction
|= inst
.operands
[0].reg
;
9315 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9317 else if (unified_syntax
)
9319 if (inst
.instruction
<= 0xffff)
9320 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9321 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9322 inst
.instruction
|= inst
.operands
[1].reg
;
9323 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
9327 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
9328 _("Thumb encoding does not support rotation"));
9329 constraint (1, BAD_HIREG
);
9336 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9344 half
= (inst
.instruction
& 0x10) != 0;
9345 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
9346 constraint (inst
.operands
[0].immisreg
,
9347 _("instruction requires register index"));
9348 constraint (inst
.operands
[0].imm
== 15,
9349 _("PC is not a valid index register"));
9350 constraint (!half
&& inst
.operands
[0].shifted
,
9351 _("instruction does not allow shifted index"));
9352 inst
.instruction
|= (inst
.operands
[0].reg
<< 16) | inst
.operands
[0].imm
;
9358 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9359 inst
.instruction
|= inst
.operands
[1].imm
;
9360 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9362 if (inst
.operands
[3].present
)
9364 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9365 _("expression too complex"));
9366 if (inst
.reloc
.exp
.X_add_number
!= 0)
9368 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9369 inst
.instruction
|= 0x00200000; /* sh bit */
9371 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9372 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9374 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9381 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9382 inst
.instruction
|= inst
.operands
[1].imm
;
9383 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9386 /* Neon instruction encoder helpers. */
9388 /* Encodings for the different types for various Neon opcodes. */
9390 /* An "invalid" code for the following tables. */
9393 struct neon_tab_entry
9396 unsigned float_or_poly
;
9397 unsigned scalar_or_imm
;
9400 /* Map overloaded Neon opcodes to their respective encodings. */
9401 #define NEON_ENC_TAB \
9402 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9403 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9404 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9405 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9406 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9407 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9408 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9409 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9410 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9411 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9412 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9413 /* Register variants of the following two instructions are encoded as
9414 vcge / vcgt with the operands reversed. */ \
9415 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9416 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9417 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9418 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9419 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9420 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9421 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9422 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9423 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9424 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9425 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9426 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9427 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9428 X(vshl, 0x0000400, N_INV, 0x0800510), \
9429 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9430 X(vand, 0x0000110, N_INV, 0x0800030), \
9431 X(vbic, 0x0100110, N_INV, 0x0800030), \
9432 X(veor, 0x1000110, N_INV, N_INV), \
9433 X(vorn, 0x0300110, N_INV, 0x0800010), \
9434 X(vorr, 0x0200110, N_INV, 0x0800010), \
9435 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9436 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9437 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9438 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9439 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9440 X(vst1, 0x0000000, 0x0800000, N_INV), \
9441 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9442 X(vst2, 0x0000100, 0x0800100, N_INV), \
9443 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9444 X(vst3, 0x0000200, 0x0800200, N_INV), \
9445 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9446 X(vst4, 0x0000300, 0x0800300, N_INV), \
9447 X(vmovn, 0x1b20200, N_INV, N_INV), \
9448 X(vtrn, 0x1b20080, N_INV, N_INV), \
9449 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9450 X(vqmovun, 0x1b20240, N_INV, N_INV)
9454 #define X(OPC,I,F,S) N_MNEM_##OPC
9459 static const struct neon_tab_entry neon_enc_tab
[] =
9461 #define X(OPC,I,F,S) { (I), (F), (S) }
9466 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9467 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9468 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9469 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9470 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9471 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9472 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9473 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9474 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9476 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9477 shapes which an instruction can accept. The following mnemonic characters
9478 are used in the tag names for this enumeration:
9480 D - Neon D<n> register
9481 Q - Neon Q<n> register
9485 L - D<n> register list
9526 /* Bit masks used in type checking given instructions.
9527 'N_EQK' means the type must be the same as (or based on in some way) the key
9528 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9529 set, various other bits can be set as well in order to modify the meaning of
9530 the type constraint. */
9553 N_KEY
= 0x080000, /* key element (main type specifier). */
9554 N_EQK
= 0x100000, /* given operand has the same type & size as the key. */
9555 N_DBL
= 0x000001, /* if N_EQK, this operand is twice the size. */
9556 N_HLF
= 0x000002, /* if N_EQK, this operand is half the size. */
9557 N_SGN
= 0x000004, /* if N_EQK, this operand is forced to be signed. */
9558 N_UNS
= 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9559 N_INT
= 0x000010, /* if N_EQK, this operand is forced to be integer. */
9560 N_FLT
= 0x000020, /* if N_EQK, this operand is forced to be float. */
9561 N_SIZ
= 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9563 N_MAX_NONSPECIAL
= N_F32
9566 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9568 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9569 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9570 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9571 #define N_SUF_32 (N_SU_32 | N_F32)
9572 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9573 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9575 /* Pass this as the first type argument to neon_check_type to ignore types
9577 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9579 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9580 specific shape when there are two alternatives. For non-polymorphic shapes,
9581 checking is done during operand parsing, so is not implemented here. */
9583 static enum neon_shape
9584 neon_check_shape (enum neon_shape req
)
9586 #define RR(X) (inst.operands[(X)].isreg)
9587 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9588 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9589 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9590 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9592 /* Fix missing optional operands. FIXME: we don't know at this point how
9593 many arguments we should have, so this makes the assumption that we have
9594 > 1. This is true of all current Neon opcodes, I think, but may not be
9595 true in the future. */
9596 if (!inst
.operands
[1].present
)
9597 inst
.operands
[1] = inst
.operands
[0];
9603 if (RD(0) && RD(1) && RD(2))
9605 else if (RQ(0) && RQ(1) && RQ(2))
9608 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9615 if (RD(0) && RD(1) && IM(2))
9617 else if (RQ(0) && RQ(1) && IM(2))
9620 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9627 if (RD(0) && RD(1) && RD(2) && IM(3))
9629 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9632 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9633 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9639 if (RD(0) && RD(1) && SC(2))
9641 else if (RQ(0) && RQ(1) && SC(2))
9644 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9653 else if (RQ(0) && RQ(1))
9656 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9664 else if (RQ(0) && SC(1))
9667 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9675 else if (RQ(0) && RR(1))
9678 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9686 else if (RQ(0) && IM(1))
9689 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9706 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
9709 /* Allow modification to be made to types which are constrained to be
9710 based on the key element, based on bits set alongside N_EQK. */
9711 if ((typebits
& N_EQK
) != 0)
9713 if ((typebits
& N_HLF
) != 0)
9715 else if ((typebits
& N_DBL
) != 0)
9717 if ((typebits
& N_SGN
) != 0)
9718 *g_type
= NT_signed
;
9719 else if ((typebits
& N_UNS
) != 0)
9720 *g_type
= NT_unsigned
;
9721 else if ((typebits
& N_INT
) != 0)
9722 *g_type
= NT_integer
;
9723 else if ((typebits
& N_FLT
) != 0)
9725 else if ((typebits
& N_SIZ
) != 0)
9726 *g_type
= NT_untyped
;
9730 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9731 operand type, i.e. the single type specified in a Neon instruction when it
9732 is the only one given. */
9734 static struct neon_type_el
9735 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
9737 struct neon_type_el dest
= *key
;
9739 assert ((thisarg
& N_EQK
) != 0);
9741 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
9746 /* Convert Neon type and size into compact bitmask representation. */
9748 static enum neon_type_mask
9749 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
9757 case 16: return N_16
;
9758 case 32: return N_32
;
9759 case 64: return N_64
;
9767 case 8: return N_I8
;
9768 case 16: return N_I16
;
9769 case 32: return N_I32
;
9770 case 64: return N_I64
;
9783 case 8: return N_P8
;
9784 case 16: return N_P16
;
9792 case 8: return N_S8
;
9793 case 16: return N_S16
;
9794 case 32: return N_S32
;
9795 case 64: return N_S64
;
9803 case 8: return N_U8
;
9804 case 16: return N_U16
;
9805 case 32: return N_U32
;
9806 case 64: return N_U64
;
9817 /* Convert compact Neon bitmask type representation to a type and size. Only
9818 handles the case where a single bit is set in the mask. */
9821 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
9822 enum neon_type_mask mask
)
9824 if ((mask
& N_EQK
) != 0)
9827 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
9829 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
9831 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
9833 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
)) != 0)
9838 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
9840 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
9841 *type
= NT_unsigned
;
9842 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
9844 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
9846 else if ((mask
& (N_P8
| N_P16
)) != 0)
9848 else if ((mask
& N_F32
) != 0)
9856 /* Modify a bitmask of allowed types. This is only needed for type
9860 modify_types_allowed (unsigned allowed
, unsigned mods
)
9863 enum neon_el_type type
;
9869 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
9871 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
9873 neon_modify_type_size (mods
, &type
, &size
);
9874 destmask
|= type_chk_of_el_type (type
, size
);
9881 /* Check type and return type classification.
9882 The manual states (paraphrase): If one datatype is given, it indicates the
9884 - the second operand, if there is one
9885 - the operand, if there is no second operand
9886 - the result, if there are no operands.
9887 This isn't quite good enough though, so we use a concept of a "key" datatype
9888 which is set on a per-instruction basis, which is the one which matters when
9889 only one data type is written.
9890 Note: this function has side-effects (e.g. filling in missing operands). All
9891 Neon instructions should call it before performing bit encoding.
9894 static struct neon_type_el
9895 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
9898 unsigned i
, pass
, key_el
= 0;
9899 unsigned types
[NEON_MAX_TYPE_ELS
];
9900 enum neon_el_type k_type
= NT_invtype
;
9901 unsigned k_size
= -1u;
9902 struct neon_type_el badtype
= {NT_invtype
, -1};
9903 unsigned key_allowed
= 0;
9905 /* Optional registers in Neon instructions are always (not) in operand 1.
9906 Fill in the missing operand here, if it was omitted. */
9907 if (els
> 1 && !inst
.operands
[1].present
)
9908 inst
.operands
[1] = inst
.operands
[0];
9910 /* Suck up all the varargs. */
9912 for (i
= 0; i
< els
; i
++)
9914 unsigned thisarg
= va_arg (ap
, unsigned);
9915 if (thisarg
== N_IGNORE_TYPE
)
9921 if ((thisarg
& N_KEY
) != 0)
9926 if (inst
.vectype
.elems
> 0)
9927 for (i
= 0; i
< els
; i
++)
9928 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
9930 first_error (_("types specified in both the mnemonic and operands"));
9934 /* Duplicate inst.vectype elements here as necessary.
9935 FIXME: No idea if this is exactly the same as the ARM assembler,
9936 particularly when an insn takes one register and one non-register
9938 if (inst
.vectype
.elems
== 1 && els
> 1)
9941 inst
.vectype
.elems
= els
;
9942 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
9943 for (j
= 0; j
< els
; j
++)
9945 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
9948 else if (inst
.vectype
.elems
== 0 && els
> 0)
9951 /* No types were given after the mnemonic, so look for types specified
9952 after each operand. We allow some flexibility here; as long as the
9953 "key" operand has a type, we can infer the others. */
9954 for (j
= 0; j
< els
; j
++)
9955 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
9956 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
9958 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
9960 for (j
= 0; j
< els
; j
++)
9961 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
9962 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
9967 first_error (_("operand types can't be inferred"));
9971 else if (inst
.vectype
.elems
!= els
)
9973 first_error (_("type specifier has the wrong number of parts"));
9977 for (pass
= 0; pass
< 2; pass
++)
9979 for (i
= 0; i
< els
; i
++)
9981 unsigned thisarg
= types
[i
];
9982 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
9983 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
9984 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
9985 unsigned g_size
= inst
.vectype
.el
[i
].size
;
9987 /* Decay more-specific signed & unsigned types to sign-insensitive
9988 integer types if sign-specific variants are unavailable. */
9989 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
9990 && (types_allowed
& N_SU_ALL
) == 0)
9991 g_type
= NT_integer
;
9993 /* If only untyped args are allowed, decay any more specific types to
9994 them. Some instructions only care about signs for some element
9995 sizes, so handle that properly. */
9996 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
9997 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
9998 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
9999 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
10000 g_type
= NT_untyped
;
10004 if ((thisarg
& N_KEY
) != 0)
10008 key_allowed
= thisarg
& ~N_KEY
;
10013 if ((thisarg
& N_EQK
) == 0)
10015 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
10017 if ((given_type
& types_allowed
) == 0)
10019 first_error (_("bad type in Neon instruction"));
10025 enum neon_el_type mod_k_type
= k_type
;
10026 unsigned mod_k_size
= k_size
;
10027 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
10028 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
10030 first_error (_("inconsistent types in Neon instruction"));
10038 return inst
.vectype
.el
[key_el
];
10041 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10042 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10045 neon_dp_fixup (unsigned i
)
10049 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10063 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10067 neon_logbits (unsigned x
)
10069 return ffs (x
) - 4;
10072 #define LOW4(R) ((R) & 0xf)
10073 #define HI1(R) (((R) >> 4) & 1)
10075 /* Encode insns with bit pattern:
10077 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10078 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10080 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10081 different meaning for some instruction. */
10084 neon_three_same (int isquad
, int ubit
, int size
)
10086 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10087 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10088 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
10089 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
10090 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
10091 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
10092 inst
.instruction
|= (isquad
!= 0) << 6;
10093 inst
.instruction
|= (ubit
!= 0) << 24;
10095 inst
.instruction
|= neon_logbits (size
) << 20;
10097 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10100 /* Encode instructions of the form:
10102 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10103 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10105 Don't write size if SIZE == -1. */
10108 neon_two_same (int qbit
, int ubit
, int size
)
10110 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10111 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10112 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10113 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10114 inst
.instruction
|= (qbit
!= 0) << 6;
10115 inst
.instruction
|= (ubit
!= 0) << 24;
10118 inst
.instruction
|= neon_logbits (size
) << 18;
10120 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10123 /* Neon instruction encoders, in approximate order of appearance. */
10126 do_neon_dyadic_i_su (void)
10128 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10129 struct neon_type_el et
= neon_check_type (3, rs
,
10130 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
10131 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10135 do_neon_dyadic_i64_su (void)
10137 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10138 struct neon_type_el et
= neon_check_type (3, rs
,
10139 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
10140 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10144 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
10147 unsigned size
= et
.size
>> 3;
10148 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10149 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10150 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10151 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10152 inst
.instruction
|= (isquad
!= 0) << 6;
10153 inst
.instruction
|= immbits
<< 16;
10154 inst
.instruction
|= (size
>> 3) << 7;
10155 inst
.instruction
|= (size
& 0x7) << 19;
10157 inst
.instruction
|= (uval
!= 0) << 24;
10159 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10163 do_neon_shl_imm (void)
10165 if (!inst
.operands
[2].isreg
)
10167 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10168 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
10169 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10170 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, inst
.operands
[2].imm
);
10174 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10175 struct neon_type_el et
= neon_check_type (3, rs
,
10176 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10177 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10178 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10183 do_neon_qshl_imm (void)
10185 if (!inst
.operands
[2].isreg
)
10187 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10188 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
10189 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10190 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, rs
== NS_QQI
, et
,
10191 inst
.operands
[2].imm
);
10195 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10196 struct neon_type_el et
= neon_check_type (3, rs
,
10197 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10198 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10199 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10204 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
10206 /* Handle .I8 and .I64 as pseudo-instructions. */
10210 /* Unfortunately, this will make everything apart from zero out-of-range.
10211 FIXME is this the intended semantics? There doesn't seem much point in
10212 accepting .I8 if so. */
10213 immediate
|= immediate
<< 8;
10217 /* Similarly, anything other than zero will be replicated in bits [63:32],
10218 which probably isn't want we want if we specified .I64. */
10219 if (immediate
!= 0)
10220 goto bad_immediate
;
10226 if (immediate
== (immediate
& 0x000000ff))
10228 *immbits
= immediate
;
10229 return (size
== 16) ? 0x9 : 0x1;
10231 else if (immediate
== (immediate
& 0x0000ff00))
10233 *immbits
= immediate
>> 8;
10234 return (size
== 16) ? 0xb : 0x3;
10236 else if (immediate
== (immediate
& 0x00ff0000))
10238 *immbits
= immediate
>> 16;
10241 else if (immediate
== (immediate
& 0xff000000))
10243 *immbits
= immediate
>> 24;
10248 first_error (_("immediate value out of range"));
10252 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10256 neon_bits_same_in_bytes (unsigned imm
)
10258 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
10259 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
10260 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
10261 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
10264 /* For immediate of above form, return 0bABCD. */
10267 neon_squash_bits (unsigned imm
)
10269 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
10270 | ((imm
& 0x01000000) >> 21);
10273 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10276 neon_qfloat_bits (unsigned imm
)
10278 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
10281 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10282 the instruction. *OP is passed as the initial value of the op field, and
10283 may be set to a different value depending on the constant (i.e.
10284 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10288 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, unsigned *immbits
,
10289 int *op
, int size
, enum neon_el_type type
)
10291 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
10293 if (size
!= 32 || *op
== 1)
10295 *immbits
= neon_qfloat_bits (immlo
);
10298 else if (size
== 64 && neon_bits_same_in_bytes (immhi
)
10299 && neon_bits_same_in_bytes (immlo
))
10301 /* Check this one first so we don't have to bother with immhi in later
10305 *immbits
= (neon_squash_bits (immhi
) << 4) | neon_squash_bits (immlo
);
10309 else if (immhi
!= 0)
10311 else if (immlo
== (immlo
& 0x000000ff))
10313 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10315 if ((size
!= 8 && size
!= 16 && size
!= 32)
10316 || (size
== 8 && *op
== 1))
10319 return (size
== 8) ? 0xe : (size
== 16) ? 0x8 : 0x0;
10321 else if (immlo
== (immlo
& 0x0000ff00))
10323 if (size
!= 16 && size
!= 32)
10325 *immbits
= immlo
>> 8;
10326 return (size
== 16) ? 0xa : 0x2;
10328 else if (immlo
== (immlo
& 0x00ff0000))
10332 *immbits
= immlo
>> 16;
10335 else if (immlo
== (immlo
& 0xff000000))
10339 *immbits
= immlo
>> 24;
10342 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
10346 *immbits
= (immlo
>> 8) & 0xff;
10349 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
10353 *immbits
= (immlo
>> 16) & 0xff;
10360 /* Write immediate bits [7:0] to the following locations:
10362 |28/24|23 19|18 16|15 4|3 0|
10363 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10365 This function is used by VMOV/VMVN/VORR/VBIC. */
10368 neon_write_immbits (unsigned immbits
)
10370 inst
.instruction
|= immbits
& 0xf;
10371 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
10372 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
10375 /* Invert low-order SIZE bits of XHI:XLO. */
10378 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
10380 unsigned immlo
= xlo
? *xlo
: 0;
10381 unsigned immhi
= xhi
? *xhi
: 0;
10386 immlo
= (~immlo
) & 0xff;
10390 immlo
= (~immlo
) & 0xffff;
10394 immhi
= (~immhi
) & 0xffffffff;
10395 /* fall through. */
10398 immlo
= (~immlo
) & 0xffffffff;
10413 do_neon_logic (void)
10415 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
10417 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10418 neon_check_type (3, rs
, N_IGNORE_TYPE
);
10419 /* U bit and size field were set as part of the bitmask. */
10420 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10421 neon_three_same (rs
== NS_QQQ
, 0, -1);
10425 enum neon_shape rs
= neon_check_shape (NS_DI_QI
);
10426 struct neon_type_el et
= neon_check_type (1, rs
, N_I8
| N_I16
| N_I32
10428 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
10432 if (et
.type
== NT_invtype
)
10435 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10440 cmode
= neon_cmode_for_logic_imm (inst
.operands
[1].imm
, &immbits
,
10445 cmode
= neon_cmode_for_logic_imm (inst
.operands
[1].imm
, &immbits
,
10450 /* Pseudo-instruction for VBIC. */
10451 immbits
= inst
.operands
[1].imm
;
10452 neon_invert_size (&immbits
, 0, et
.size
);
10453 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
10457 /* Pseudo-instruction for VORR. */
10458 immbits
= inst
.operands
[1].imm
;
10459 neon_invert_size (&immbits
, 0, et
.size
);
10460 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
10470 inst
.instruction
|= (rs
== NS_QI
) << 6;
10471 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10472 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10473 inst
.instruction
|= cmode
<< 8;
10474 neon_write_immbits (immbits
);
10476 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10481 do_neon_bitfield (void)
10483 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10484 neon_check_type (3, rs
, N_IGNORE_TYPE
);
10485 neon_three_same (rs
== NS_QQQ
, 0, -1);
10489 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
10492 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10493 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
10495 if (et
.type
== NT_float
)
10497 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
10498 neon_three_same (rs
== NS_QQQ
, 0, -1);
10502 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10503 neon_three_same (rs
== NS_QQQ
, et
.type
== ubit_meaning
, et
.size
);
10508 do_neon_dyadic_if_su (void)
10510 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
10514 do_neon_dyadic_if_su_d (void)
10516 /* This version only allow D registers, but that constraint is enforced during
10517 operand parsing so we don't need to do anything extra here. */
10518 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
10522 do_neon_dyadic_if_i (void)
10524 neon_dyadic_misc (NT_unsigned
, N_IF_32
, 0);
10528 do_neon_dyadic_if_i_d (void)
10530 neon_dyadic_misc (NT_unsigned
, N_IF_32
, 0);
10534 do_neon_addsub_if_i (void)
10536 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10537 affected if we specify unsigned args. */
10538 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
10541 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10543 V<op> A,B (A is operand 0, B is operand 2)
10548 so handle that case specially. */
10551 neon_exchange_operands (void)
10553 void *scratch
= alloca (sizeof (inst
.operands
[0]));
10554 if (inst
.operands
[1].present
)
10556 /* Swap operands[1] and operands[2]. */
10557 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
10558 inst
.operands
[1] = inst
.operands
[2];
10559 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
10563 inst
.operands
[1] = inst
.operands
[2];
10564 inst
.operands
[2] = inst
.operands
[0];
10569 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
10571 if (inst
.operands
[2].isreg
)
10574 neon_exchange_operands ();
10575 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
10579 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10580 struct neon_type_el et
= neon_check_type (2, rs
,
10581 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
10583 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10584 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10585 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10586 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10587 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10588 inst
.instruction
|= (rs
== NS_QQI
) << 6;
10589 inst
.instruction
|= (et
.type
== NT_float
) << 10;
10590 inst
.instruction
|= neon_logbits (et
.size
) << 18;
10592 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10599 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
10603 do_neon_cmp_inv (void)
10605 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
10611 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
10614 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10615 scalars, which are encoded in 5 bits, M : Rm.
10616 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10617 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10621 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
10623 unsigned regno
= NEON_SCALAR_REG (scalar
);
10624 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
10629 if (regno
> 7 || elno
> 3)
10631 return regno
| (elno
<< 3);
10634 if (regno
> 15 || elno
> 1)
10636 return regno
| (elno
<< 4);
10640 first_error (_("scalar out of range for multiply instruction"));
10646 /* Encode multiply / multiply-accumulate scalar instructions. */
10649 neon_mul_mac (struct neon_type_el et
, int ubit
)
10653 /* Give a more helpful error message if we have an invalid type. */
10654 if (et
.type
== NT_invtype
)
10657 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
10658 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10659 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10660 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
10661 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
10662 inst
.instruction
|= LOW4 (scalar
);
10663 inst
.instruction
|= HI1 (scalar
) << 5;
10664 inst
.instruction
|= (et
.type
== NT_float
) << 8;
10665 inst
.instruction
|= neon_logbits (et
.size
) << 20;
10666 inst
.instruction
|= (ubit
!= 0) << 24;
10668 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10672 do_neon_mac_maybe_scalar (void)
10674 if (inst
.operands
[2].isscalar
)
10676 enum neon_shape rs
= neon_check_shape (NS_DDS_QQS
);
10677 struct neon_type_el et
= neon_check_type (3, rs
,
10678 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
10679 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
10680 neon_mul_mac (et
, rs
== NS_QQS
);
10683 do_neon_dyadic_if_i ();
10689 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10690 struct neon_type_el et
= neon_check_type (3, rs
,
10691 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
10692 neon_three_same (rs
== NS_QQQ
, 0, et
.size
);
10695 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10696 same types as the MAC equivalents. The polynomial type for this instruction
10697 is encoded the same as the integer type. */
10702 if (inst
.operands
[2].isscalar
)
10703 do_neon_mac_maybe_scalar ();
10705 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
10709 do_neon_qdmulh (void)
10711 if (inst
.operands
[2].isscalar
)
10713 enum neon_shape rs
= neon_check_shape (NS_DDS_QQS
);
10714 struct neon_type_el et
= neon_check_type (3, rs
,
10715 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
10716 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
10717 neon_mul_mac (et
, rs
== NS_QQS
);
10721 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10722 struct neon_type_el et
= neon_check_type (3, rs
,
10723 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
10724 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10725 /* The U bit (rounding) comes from bit mask. */
10726 neon_three_same (rs
== NS_QQQ
, 0, et
.size
);
10731 do_neon_fcmp_absolute (void)
10733 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10734 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
10735 /* Size field comes from bit mask. */
10736 neon_three_same (rs
== NS_QQQ
, 1, -1);
10740 do_neon_fcmp_absolute_inv (void)
10742 neon_exchange_operands ();
10743 do_neon_fcmp_absolute ();
10747 do_neon_step (void)
10749 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10750 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
10751 neon_three_same (rs
== NS_QQQ
, 0, -1);
10755 do_neon_abs_neg (void)
10757 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
10758 struct neon_type_el et
= neon_check_type (3, rs
,
10759 N_EQK
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
10760 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10761 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10762 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10763 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10764 inst
.instruction
|= (rs
== NS_QQ
) << 6;
10765 inst
.instruction
|= (et
.type
== NT_float
) << 10;
10766 inst
.instruction
|= neon_logbits (et
.size
) << 18;
10768 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10774 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10775 struct neon_type_el et
= neon_check_type (2, rs
,
10776 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
10777 int imm
= inst
.operands
[2].imm
;
10778 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
10779 _("immediate out of range for insert"));
10780 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, imm
);
10786 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10787 struct neon_type_el et
= neon_check_type (2, rs
,
10788 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
10789 int imm
= inst
.operands
[2].imm
;
10790 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10791 _("immediate out of range for insert"));
10792 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, et
.size
- imm
);
10796 do_neon_qshlu_imm (void)
10798 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10799 struct neon_type_el et
= neon_check_type (2, rs
,
10800 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
10801 int imm
= inst
.operands
[2].imm
;
10802 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
10803 _("immediate out of range for shift"));
10804 /* Only encodes the 'U present' variant of the instruction.
10805 In this case, signed types have OP (bit 8) set to 0.
10806 Unsigned types have OP set to 1. */
10807 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
10808 /* The rest of the bits are the same as other immediate shifts. */
10809 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, imm
);
10813 do_neon_qmovn (void)
10815 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
10816 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
10817 /* Saturating move where operands can be signed or unsigned, and the
10818 destination has the same signedness. */
10819 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10820 if (et
.type
== NT_unsigned
)
10821 inst
.instruction
|= 0xc0;
10823 inst
.instruction
|= 0x80;
10824 neon_two_same (0, 1, et
.size
/ 2);
10828 do_neon_qmovun (void)
10830 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
10831 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
10832 /* Saturating move with unsigned results. Operands must be signed. */
10833 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10834 neon_two_same (0, 1, et
.size
/ 2);
10838 do_neon_rshift_sat_narrow (void)
10840 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10841 or unsigned. If operands are unsigned, results must also be unsigned. */
10842 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
10843 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
10844 int imm
= inst
.operands
[2].imm
;
10845 /* This gets the bounds check, size encoding and immediate bits calculation
10849 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10850 VQMOVN.I<size> <Dd>, <Qm>. */
10853 inst
.operands
[2].present
= 0;
10854 inst
.instruction
= N_MNEM_vqmovn
;
10859 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10860 _("immediate out of range"));
10861 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
10865 do_neon_rshift_sat_narrow_u (void)
10867 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10868 or unsigned. If operands are unsigned, results must also be unsigned. */
10869 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
10870 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
10871 int imm
= inst
.operands
[2].imm
;
10872 /* This gets the bounds check, size encoding and immediate bits calculation
10876 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10877 VQMOVUN.I<size> <Dd>, <Qm>. */
10880 inst
.operands
[2].present
= 0;
10881 inst
.instruction
= N_MNEM_vqmovun
;
10886 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10887 _("immediate out of range"));
10888 /* FIXME: The manual is kind of unclear about what value U should have in
10889 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10891 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
10895 do_neon_movn (void)
10897 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
10898 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
10899 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10900 neon_two_same (0, 1, et
.size
/ 2);
10904 do_neon_rshift_narrow (void)
10906 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
10907 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
10908 int imm
= inst
.operands
[2].imm
;
10909 /* This gets the bounds check, size encoding and immediate bits calculation
10913 /* If immediate is zero then we are a pseudo-instruction for
10914 VMOVN.I<size> <Dd>, <Qm> */
10917 inst
.operands
[2].present
= 0;
10918 inst
.instruction
= N_MNEM_vmovn
;
10923 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10924 _("immediate out of range for narrowing operation"));
10925 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
10929 do_neon_shll (void)
10931 /* FIXME: Type checking when lengthening. */
10932 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
10933 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
10934 unsigned imm
= inst
.operands
[2].imm
;
10936 if (imm
== et
.size
)
10938 /* Maximum shift variant. */
10939 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10940 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10941 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10942 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10943 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10944 inst
.instruction
|= neon_logbits (et
.size
) << 18;
10946 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10950 /* A more-specific type check for non-max versions. */
10951 et
= neon_check_type (2, NS_QDI
,
10952 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
10953 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10954 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
10958 /* Check the various types for the VCVT instruction, and return the one that
10959 the current instruction is. */
10962 neon_cvt_flavour (enum neon_shape rs
)
10964 #define CVT_VAR(C,X,Y) \
10965 et = neon_check_type (2, rs, (X), (Y)); \
10966 if (et.type != NT_invtype) \
10968 inst.error = NULL; \
10971 struct neon_type_el et
;
10973 CVT_VAR (0, N_S32
, N_F32
);
10974 CVT_VAR (1, N_U32
, N_F32
);
10975 CVT_VAR (2, N_F32
, N_S32
);
10976 CVT_VAR (3, N_F32
, N_U32
);
10985 /* Fixed-point conversion with #0 immediate is encoded as an integer
10987 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0)
10989 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10990 int flavour
= neon_cvt_flavour (rs
);
10991 unsigned immbits
= 32 - inst
.operands
[2].imm
;
10992 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
10993 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10995 inst
.instruction
|= enctab
[flavour
];
10996 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10997 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10998 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10999 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11000 inst
.instruction
|= (rs
== NS_QQI
) << 6;
11001 inst
.instruction
|= 1 << 21;
11002 inst
.instruction
|= immbits
<< 16;
11006 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11007 int flavour
= neon_cvt_flavour (rs
);
11008 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
11009 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11011 inst
.instruction
|= enctab
[flavour
];
11012 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11013 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11014 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11015 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11016 inst
.instruction
|= (rs
== NS_QQ
) << 6;
11017 inst
.instruction
|= 2 << 18;
11019 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11023 neon_move_immediate (void)
11025 enum neon_shape rs
= neon_check_shape (NS_DI_QI
);
11026 struct neon_type_el et
= neon_check_type (1, rs
,
11027 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
);
11028 unsigned immlo
, immhi
= 0, immbits
;
11031 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11032 op
= (inst
.instruction
& (1 << 5)) != 0;
11034 immlo
= inst
.operands
[1].imm
;
11035 if (inst
.operands
[1].regisimm
)
11036 immhi
= inst
.operands
[1].reg
;
11038 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
11039 _("immediate has bits set outside the operand size"));
11041 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
11042 et
.size
, et
.type
)) == FAIL
)
11044 /* Invert relevant bits only. */
11045 neon_invert_size (&immlo
, &immhi
, et
.size
);
11046 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11047 with one or the other; those cases are caught by
11048 neon_cmode_for_move_imm. */
11050 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
11051 et
.size
, et
.type
)) == FAIL
)
11053 first_error (_("immediate out of range"));
11058 inst
.instruction
&= ~(1 << 5);
11059 inst
.instruction
|= op
<< 5;
11061 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11062 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11063 inst
.instruction
|= (rs
== NS_QI
) << 6;
11064 inst
.instruction
|= cmode
<< 8;
11066 neon_write_immbits (immbits
);
11072 if (inst
.operands
[1].isreg
)
11074 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11076 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11077 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11078 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11079 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11080 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11081 inst
.instruction
|= (rs
== NS_QQ
) << 6;
11085 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11086 neon_move_immediate ();
11089 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11092 /* Encode instructions of form:
11094 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11095 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11100 neon_mixed_length (struct neon_type_el et
, unsigned size
)
11102 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11103 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11104 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11105 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11106 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11107 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11108 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
11109 inst
.instruction
|= neon_logbits (size
) << 20;
11111 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11115 do_neon_dyadic_long (void)
11117 /* FIXME: Type checking for lengthening op. */
11118 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11119 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
11120 neon_mixed_length (et
, et
.size
);
11124 do_neon_abal (void)
11126 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11127 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
11128 neon_mixed_length (et
, et
.size
);
11132 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
11134 if (inst
.operands
[2].isscalar
)
11136 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
11137 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
11138 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11139 neon_mul_mac (et
, et
.type
== NT_unsigned
);
11143 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11144 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
11145 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11146 neon_mixed_length (et
, et
.size
);
11151 do_neon_mac_maybe_scalar_long (void)
11153 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
11157 do_neon_dyadic_wide (void)
11159 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
11160 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
11161 neon_mixed_length (et
, et
.size
);
11165 do_neon_dyadic_narrow (void)
11167 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11168 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
11169 neon_mixed_length (et
, et
.size
/ 2);
11173 do_neon_mul_sat_scalar_long (void)
11175 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
11179 do_neon_vmull (void)
11181 if (inst
.operands
[2].isscalar
)
11182 do_neon_mac_maybe_scalar_long ();
11185 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11186 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
11187 if (et
.type
== NT_poly
)
11188 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
11190 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11191 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11192 zero. Should be OK as-is. */
11193 neon_mixed_length (et
, et
.size
);
11200 enum neon_shape rs
= neon_check_shape (NS_DDDI_QQQI
);
11201 struct neon_type_el et
= neon_check_type (3, rs
,
11202 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
11203 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
11204 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11205 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11206 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11207 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11208 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11209 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11210 inst
.instruction
|= (rs
== NS_QQQI
) << 6;
11211 inst
.instruction
|= imm
<< 8;
11213 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11219 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11220 struct neon_type_el et
= neon_check_type (2, rs
,
11221 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11222 unsigned op
= (inst
.instruction
>> 7) & 3;
11223 /* N (width of reversed regions) is encoded as part of the bitmask. We
11224 extract it here to check the elements to be reversed are smaller.
11225 Otherwise we'd get a reserved instruction. */
11226 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
11227 assert (elsize
!= 0);
11228 constraint (et
.size
>= elsize
,
11229 _("elements must be smaller than reversal region"));
11230 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11236 if (inst
.operands
[1].isscalar
)
11238 enum neon_shape rs
= neon_check_shape (NS_DS_QS
);
11239 struct neon_type_el et
= neon_check_type (2, rs
,
11240 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11241 unsigned sizebits
= et
.size
>> 3;
11242 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
11243 int logsize
= neon_logbits (et
.size
);
11244 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
11245 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11246 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11247 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11248 inst
.instruction
|= LOW4 (dm
);
11249 inst
.instruction
|= HI1 (dm
) << 5;
11250 inst
.instruction
|= (rs
== NS_QS
) << 6;
11251 inst
.instruction
|= x
<< 17;
11252 inst
.instruction
|= sizebits
<< 16;
11254 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11258 enum neon_shape rs
= neon_check_shape (NS_DR_QR
);
11259 struct neon_type_el et
= neon_check_type (1, rs
,
11260 N_8
| N_16
| N_32
| N_KEY
);
11261 unsigned save_cond
= inst
.instruction
& 0xf0000000;
11262 /* Duplicate ARM register to lanes of vector. */
11263 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
11266 case 8: inst
.instruction
|= 0x400000; break;
11267 case 16: inst
.instruction
|= 0x000020; break;
11268 case 32: inst
.instruction
|= 0x000000; break;
11271 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
11272 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
11273 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
11274 inst
.instruction
|= (rs
== NS_QR
) << 21;
11275 /* The encoding for this instruction is identical for the ARM and Thumb
11276 variants, except for the condition field. */
11278 inst
.instruction
|= 0xe0000000;
11280 inst
.instruction
|= save_cond
;
11284 /* VMOV has particularly many variations. It can be one of:
11285 0. VMOV<c><q> <Qd>, <Qm>
11286 1. VMOV<c><q> <Dd>, <Dm>
11287 (Register operations, which are VORR with Rm = Rn.)
11288 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11289 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11291 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11292 (ARM register to scalar.)
11293 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11294 (Two ARM registers to vector.)
11295 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11296 (Scalar to ARM register.)
11297 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11298 (Vector to two ARM registers.)
11300 We should have just enough information to be able to disambiguate most of
11301 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11302 registers" cases. For these, abuse the .regisimm operand field to signify a
11305 All the encoded bits are hardcoded by this function.
11307 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11308 can specify a type where it doesn't make sense to, and is ignored).
11314 int nargs
= inst
.operands
[0].present
+ inst
.operands
[1].present
11315 + inst
.operands
[2].present
;
11316 unsigned save_cond
= thumb_mode
? 0xe0000000 : inst
.instruction
& 0xf0000000;
11321 /* Cases 0, 1, 2, 3, 4, 6. */
11322 if (inst
.operands
[1].isscalar
)
11325 struct neon_type_el et
= neon_check_type (2, NS_IGNORE
,
11326 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
11327 unsigned logsize
= neon_logbits (et
.size
);
11328 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
11329 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
11330 unsigned abcdebits
= 0;
11332 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
11333 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
11337 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
11338 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
11339 case 32: abcdebits
= 0x00; break;
11343 abcdebits
|= x
<< logsize
;
11344 inst
.instruction
= save_cond
;
11345 inst
.instruction
|= 0xe100b10;
11346 inst
.instruction
|= LOW4 (dn
) << 16;
11347 inst
.instruction
|= HI1 (dn
) << 7;
11348 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11349 inst
.instruction
|= (abcdebits
& 3) << 5;
11350 inst
.instruction
|= (abcdebits
>> 2) << 21;
11352 else if (inst
.operands
[1].isreg
)
11354 /* Cases 0, 1, 4. */
11355 if (inst
.operands
[0].isscalar
)
11358 unsigned bcdebits
= 0;
11359 struct neon_type_el et
= neon_check_type (2, NS_IGNORE
,
11360 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
11361 int logsize
= neon_logbits (et
.size
);
11362 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
11363 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
11365 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
11366 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
11370 case 8: bcdebits
= 0x8; break;
11371 case 16: bcdebits
= 0x1; break;
11372 case 32: bcdebits
= 0x0; break;
11376 bcdebits
|= x
<< logsize
;
11377 inst
.instruction
= save_cond
;
11378 inst
.instruction
|= 0xe000b10;
11379 inst
.instruction
|= LOW4 (dn
) << 16;
11380 inst
.instruction
|= HI1 (dn
) << 7;
11381 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11382 inst
.instruction
|= (bcdebits
& 3) << 5;
11383 inst
.instruction
|= (bcdebits
>> 2) << 21;
11388 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11389 /* The architecture manual I have doesn't explicitly state which
11390 value the U bit should have for register->register moves, but
11391 the equivalent VORR instruction has U = 0, so do that. */
11392 inst
.instruction
= 0x0200110;
11393 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11394 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11395 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11396 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11397 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11398 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11399 inst
.instruction
|= (rs
== NS_QQ
) << 6;
11401 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11407 inst
.instruction
= 0x0800010;
11408 neon_move_immediate ();
11409 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11415 if (inst
.operands
[0].regisimm
)
11418 inst
.instruction
= save_cond
;
11419 inst
.instruction
|= 0xc400b10;
11420 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
11421 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
11422 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11423 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11428 inst
.instruction
= save_cond
;
11429 inst
.instruction
|= 0xc500b10;
11430 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11431 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11432 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11433 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11443 do_neon_rshift_round_imm (void)
11445 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
11446 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
11447 int imm
= inst
.operands
[2].imm
;
11449 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11452 inst
.operands
[2].present
= 0;
11457 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11458 _("immediate out of range for shift"));
11459 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, rs
== NS_QQI
, et
,
11464 do_neon_movl (void)
11466 struct neon_type_el et
= neon_check_type (2, NS_QD
,
11467 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
11468 unsigned sizebits
= et
.size
>> 3;
11469 inst
.instruction
|= sizebits
<< 19;
11470 neon_two_same (0, et
.type
== NT_unsigned
, -1);
11476 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11477 struct neon_type_el et
= neon_check_type (2, rs
,
11478 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11479 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11480 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11484 do_neon_zip_uzp (void)
11486 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11487 struct neon_type_el et
= neon_check_type (2, rs
,
11488 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11489 if (rs
== NS_DD
&& et
.size
== 32)
11491 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11492 inst
.instruction
= N_MNEM_vtrn
;
11496 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11500 do_neon_sat_abs_neg (void)
11502 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11503 struct neon_type_el et
= neon_check_type (2, rs
,
11504 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
11505 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11509 do_neon_pair_long (void)
11511 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11512 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
11513 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11514 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
11515 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11519 do_neon_recip_est (void)
11521 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11522 struct neon_type_el et
= neon_check_type (2, rs
,
11523 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
11524 inst
.instruction
|= (et
.type
== NT_float
) << 8;
11525 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11531 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11532 struct neon_type_el et
= neon_check_type (2, rs
,
11533 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
11534 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11540 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11541 struct neon_type_el et
= neon_check_type (2, rs
,
11542 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
11543 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11549 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11550 struct neon_type_el et
= neon_check_type (2, rs
,
11551 N_EQK
| N_INT
, N_8
| N_KEY
);
11552 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11558 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11559 neon_two_same (rs
== NS_QQ
, 1, -1);
11563 do_neon_tbl_tbx (void)
11565 unsigned listlenbits
;
11566 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
11568 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
11570 first_error (_("bad list length for table lookup"));
11574 listlenbits
= inst
.operands
[1].imm
- 1;
11575 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11576 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11577 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11578 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11579 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11580 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11581 inst
.instruction
|= listlenbits
<< 8;
11583 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11587 do_neon_ldm_stm (void)
11589 /* P, U and L bits are part of bitmask. */
11590 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
11591 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
11593 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
11594 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11596 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
11597 _("register list must contain at least 1 and at most 16 "
11600 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
11601 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
11602 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
11603 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
11605 inst
.instruction
|= offsetbits
;
11608 inst
.instruction
|= 0xe0000000;
11612 do_neon_ldr_str (void)
11614 unsigned offsetbits
;
11616 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
11618 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11619 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11621 constraint (inst
.reloc
.pc_rel
&& !is_ldr
,
11622 _("PC-relative addressing unavailable with VSTR"));
11624 constraint (!inst
.reloc
.pc_rel
&& inst
.reloc
.exp
.X_op
!= O_constant
,
11625 _("Immediate value must be a constant"));
11627 if (inst
.reloc
.exp
.X_add_number
< 0)
11630 offsetbits
= -inst
.reloc
.exp
.X_add_number
/ 4;
11633 offsetbits
= inst
.reloc
.exp
.X_add_number
/ 4;
11635 /* FIXME: Does this catch everything? */
11636 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11637 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11638 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
,
11640 constraint ((inst
.operands
[1].imm
& 3) != 0,
11641 _("Offset must be a multiple of 4"));
11642 constraint (offsetbits
!= (offsetbits
& 0xff),
11643 _("Immediate offset out of range"));
11645 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11646 inst
.instruction
|= offsetbits
& 0xff;
11647 inst
.instruction
|= offset_up
<< 23;
11650 inst
.instruction
|= 0xe0000000;
11652 if (inst
.reloc
.pc_rel
)
11655 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
11657 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
11660 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11663 /* "interleave" version also handles non-interleaving register VLD1/VST1
11667 do_neon_ld_st_interleave (void)
11669 struct neon_type_el et
= neon_check_type (1, NS_IGNORE
,
11670 N_8
| N_16
| N_32
| N_64
);
11671 unsigned alignbits
= 0;
11673 /* The bits in this table go:
11674 0: register stride of one (0) or two (1)
11675 1,2: register list length, minus one (1, 2, 3, 4).
11676 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11677 We use -1 for invalid entries. */
11678 const int typetable
[] =
11680 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11681 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11682 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11683 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11687 if (et
.type
== NT_invtype
)
11690 if (inst
.operands
[1].immisalign
)
11691 switch (inst
.operands
[1].imm
>> 8)
11693 case 64: alignbits
= 1; break;
11695 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
11696 goto bad_alignment
;
11700 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
11701 goto bad_alignment
;
11706 first_error (_("bad alignment"));
11710 inst
.instruction
|= alignbits
<< 4;
11711 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11713 /* Bits [4:6] of the immediate in a list specifier encode register stride
11714 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11715 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11716 up the right value for "type" in a table based on this value and the given
11717 list style, then stick it back. */
11718 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
11719 | (((inst
.instruction
>> 8) & 3) << 3);
11721 typebits
= typetable
[idx
];
11723 constraint (typebits
== -1, _("bad list type for instruction"));
11725 inst
.instruction
&= ~0xf00;
11726 inst
.instruction
|= typebits
<< 8;
11729 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11730 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11731 otherwise. The variable arguments are a list of pairs of legal (size, align)
11732 values, terminated with -1. */
11735 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
11738 int result
= FAIL
, thissize
, thisalign
;
11740 if (!inst
.operands
[1].immisalign
)
11746 va_start (ap
, do_align
);
11750 thissize
= va_arg (ap
, int);
11751 if (thissize
== -1)
11753 thisalign
= va_arg (ap
, int);
11755 if (size
== thissize
&& align
== thisalign
)
11758 while (result
!= SUCCESS
);
11762 if (result
== SUCCESS
)
11765 first_error (_("unsupported alignment for instruction"));
11771 do_neon_ld_st_lane (void)
11773 struct neon_type_el et
= neon_check_type (1, NS_IGNORE
, N_8
| N_16
| N_32
);
11774 int align_good
, do_align
= 0;
11775 int logsize
= neon_logbits (et
.size
);
11776 int align
= inst
.operands
[1].imm
>> 8;
11777 int n
= (inst
.instruction
>> 8) & 3;
11778 int max_el
= 64 / et
.size
;
11780 if (et
.type
== NT_invtype
)
11783 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
11784 _("bad list length"));
11785 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
11786 _("scalar index out of range"));
11787 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
11789 _("stride of 2 unavailable when element size is 8"));
11793 case 0: /* VLD1 / VST1. */
11794 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
11796 if (align_good
== FAIL
)
11800 unsigned alignbits
= 0;
11803 case 16: alignbits
= 0x1; break;
11804 case 32: alignbits
= 0x3; break;
11807 inst
.instruction
|= alignbits
<< 4;
11811 case 1: /* VLD2 / VST2. */
11812 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
11814 if (align_good
== FAIL
)
11817 inst
.instruction
|= 1 << 4;
11820 case 2: /* VLD3 / VST3. */
11821 constraint (inst
.operands
[1].immisalign
,
11822 _("can't use alignment with this instruction"));
11825 case 3: /* VLD4 / VST4. */
11826 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
11827 16, 64, 32, 64, 32, 128, -1);
11828 if (align_good
== FAIL
)
11832 unsigned alignbits
= 0;
11835 case 8: alignbits
= 0x1; break;
11836 case 16: alignbits
= 0x1; break;
11837 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
11840 inst
.instruction
|= alignbits
<< 4;
11847 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11848 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11849 inst
.instruction
|= 1 << (4 + logsize
);
11851 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
11852 inst
.instruction
|= logsize
<< 10;
11855 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11858 do_neon_ld_dup (void)
11860 struct neon_type_el et
= neon_check_type (1, NS_IGNORE
, N_8
| N_16
| N_32
);
11861 int align_good
, do_align
= 0;
11863 if (et
.type
== NT_invtype
)
11866 switch ((inst
.instruction
>> 8) & 3)
11868 case 0: /* VLD1. */
11869 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
11870 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
11871 &do_align
, 16, 16, 32, 32, -1);
11872 if (align_good
== FAIL
)
11874 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
11877 case 2: inst
.instruction
|= 1 << 5; break;
11878 default: first_error (_("bad list length")); return;
11880 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11883 case 1: /* VLD2. */
11884 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
11885 &do_align
, 8, 16, 16, 32, 32, 64, -1);
11886 if (align_good
== FAIL
)
11888 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
11889 _("bad list length"));
11890 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11891 inst
.instruction
|= 1 << 5;
11892 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11895 case 2: /* VLD3. */
11896 constraint (inst
.operands
[1].immisalign
,
11897 _("can't use alignment with this instruction"));
11898 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
11899 _("bad list length"));
11900 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11901 inst
.instruction
|= 1 << 5;
11902 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11905 case 3: /* VLD4. */
11907 int align
= inst
.operands
[1].imm
>> 8;
11908 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
11909 16, 64, 32, 64, 32, 128, -1);
11910 if (align_good
== FAIL
)
11912 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
11913 _("bad list length"));
11914 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11915 inst
.instruction
|= 1 << 5;
11916 if (et
.size
== 32 && align
== 128)
11917 inst
.instruction
|= 0x3 << 6;
11919 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11926 inst
.instruction
|= do_align
<< 4;
11929 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
11930 apart from bits [11:4]. */
11933 do_neon_ldx_stx (void)
11935 switch (NEON_LANE (inst
.operands
[0].imm
))
11937 case NEON_INTERLEAVE_LANES
:
11938 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
11939 do_neon_ld_st_interleave ();
11942 case NEON_ALL_LANES
:
11943 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
11948 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
11949 do_neon_ld_st_lane ();
11952 /* L bit comes from bit mask. */
11953 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11954 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11955 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11957 if (inst
.operands
[1].postind
)
11959 int postreg
= inst
.operands
[1].imm
& 0xf;
11960 constraint (!inst
.operands
[1].immisreg
,
11961 _("post-index must be a register"));
11962 constraint (postreg
== 0xd || postreg
== 0xf,
11963 _("bad register for post-index"));
11964 inst
.instruction
|= postreg
;
11966 else if (inst
.operands
[1].writeback
)
11968 inst
.instruction
|= 0xd;
11971 inst
.instruction
|= 0xf;
11974 inst
.instruction
|= 0xf9000000;
11976 inst
.instruction
|= 0xf4000000;
11980 /* Overall per-instruction processing. */
11982 /* We need to be able to fix up arbitrary expressions in some statements.
11983 This is so that we can handle symbols that are an arbitrary distance from
11984 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
11985 which returns part of an address in a form which will be valid for
11986 a data instruction. We do this by pushing the expression into a symbol
11987 in the expr_section, and creating a fix for that. */
11990 fix_new_arm (fragS
* frag
,
12005 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
12009 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
12014 /* Mark whether the fix is to a THUMB instruction, or an ARM
12016 new_fix
->tc_fix_data
= thumb_mode
;
12019 /* Create a frg for an instruction requiring relaxation. */
12021 output_relax_insn (void)
12028 /* The size of the instruction is unknown, so tie the debug info to the
12029 start of the instruction. */
12030 dwarf2_emit_insn (0);
12033 switch (inst
.reloc
.exp
.X_op
)
12036 sym
= inst
.reloc
.exp
.X_add_symbol
;
12037 offset
= inst
.reloc
.exp
.X_add_number
;
12041 offset
= inst
.reloc
.exp
.X_add_number
;
12044 sym
= make_expr_symbol (&inst
.reloc
.exp
);
12048 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
12049 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
12050 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
12053 /* Write a 32-bit thumb instruction to buf. */
12055 put_thumb32_insn (char * buf
, unsigned long insn
)
12057 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
12058 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
12062 output_inst (const char * str
)
12068 as_bad ("%s -- `%s'", inst
.error
, str
);
12072 output_relax_insn();
12075 if (inst
.size
== 0)
12078 to
= frag_more (inst
.size
);
12080 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
12082 assert (inst
.size
== (2 * THUMB_SIZE
));
12083 put_thumb32_insn (to
, inst
.instruction
);
12085 else if (inst
.size
> INSN_SIZE
)
12087 assert (inst
.size
== (2 * INSN_SIZE
));
12088 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
12089 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
12092 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
12094 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
12095 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
12096 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
12100 dwarf2_emit_insn (inst
.size
);
12104 /* Tag values used in struct asm_opcode's tag field. */
12107 OT_unconditional
, /* Instruction cannot be conditionalized.
12108 The ARM condition field is still 0xE. */
12109 OT_unconditionalF
, /* Instruction cannot be conditionalized
12110 and carries 0xF in its ARM condition field. */
12111 OT_csuffix
, /* Instruction takes a conditional suffix. */
12112 OT_cinfix3
, /* Instruction takes a conditional infix,
12113 beginning at character index 3. (In
12114 unified mode, it becomes a suffix.) */
12115 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
12116 character index 3, even in unified mode. Used for
12117 legacy instructions where suffix and infix forms
12118 may be ambiguous. */
12119 OT_csuf_or_in3
, /* Instruction takes either a conditional
12120 suffix or an infix at character index 3. */
12121 OT_odd_infix_unc
, /* This is the unconditional variant of an
12122 instruction that takes a conditional infix
12123 at an unusual position. In unified mode,
12124 this variant will accept a suffix. */
12125 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
12126 are the conditional variants of instructions that
12127 take conditional infixes in unusual positions.
12128 The infix appears at character index
12129 (tag - OT_odd_infix_0). These are not accepted
12130 in unified mode. */
12133 /* Subroutine of md_assemble, responsible for looking up the primary
12134 opcode from the mnemonic the user wrote. STR points to the
12135 beginning of the mnemonic.
12137 This is not simply a hash table lookup, because of conditional
12138 variants. Most instructions have conditional variants, which are
12139 expressed with a _conditional affix_ to the mnemonic. If we were
12140 to encode each conditional variant as a literal string in the opcode
12141 table, it would have approximately 20,000 entries.
12143 Most mnemonics take this affix as a suffix, and in unified syntax,
12144 'most' is upgraded to 'all'. However, in the divided syntax, some
12145 instructions take the affix as an infix, notably the s-variants of
12146 the arithmetic instructions. Of those instructions, all but six
12147 have the infix appear after the third character of the mnemonic.
12149 Accordingly, the algorithm for looking up primary opcodes given
12152 1. Look up the identifier in the opcode table.
12153 If we find a match, go to step U.
12155 2. Look up the last two characters of the identifier in the
12156 conditions table. If we find a match, look up the first N-2
12157 characters of the identifier in the opcode table. If we
12158 find a match, go to step CE.
12160 3. Look up the fourth and fifth characters of the identifier in
12161 the conditions table. If we find a match, extract those
12162 characters from the identifier, and look up the remaining
12163 characters in the opcode table. If we find a match, go
12168 U. Examine the tag field of the opcode structure, in case this is
12169 one of the six instructions with its conditional infix in an
12170 unusual place. If it is, the tag tells us where to find the
12171 infix; look it up in the conditions table and set inst.cond
12172 accordingly. Otherwise, this is an unconditional instruction.
12173 Again set inst.cond accordingly. Return the opcode structure.
12175 CE. Examine the tag field to make sure this is an instruction that
12176 should receive a conditional suffix. If it is not, fail.
12177 Otherwise, set inst.cond from the suffix we already looked up,
12178 and return the opcode structure.
12180 CM. Examine the tag field to make sure this is an instruction that
12181 should receive a conditional infix after the third character.
12182 If it is not, fail. Otherwise, undo the edits to the current
12183 line of input and proceed as for case CE. */
12185 static const struct asm_opcode
*
12186 opcode_lookup (char **str
)
12190 const struct asm_opcode
*opcode
;
12191 const struct asm_cond
*cond
;
12194 /* Scan up to the end of the mnemonic, which must end in white space,
12195 '.' (in unified mode only), or end of string. */
12196 for (base
= end
= *str
; *end
!= '\0'; end
++)
12197 if (*end
== ' ' || (unified_syntax
&& *end
== '.'))
12203 /* Handle a possible width suffix and/or Neon type suffix. */
12210 else if (end
[1] == 'n')
12215 inst
.vectype
.elems
= 0;
12217 *str
= end
+ offset
;
12219 if (end
[offset
] == '.')
12221 /* See if we have a Neon type suffix. */
12222 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
12225 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
12231 /* Look for unaffixed or special-case affixed mnemonic. */
12232 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
12236 if (opcode
->tag
< OT_odd_infix_0
)
12238 inst
.cond
= COND_ALWAYS
;
12242 if (unified_syntax
)
12243 as_warn (_("conditional infixes are deprecated in unified syntax"));
12244 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
12245 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
12248 inst
.cond
= cond
->value
;
12252 /* Cannot have a conditional suffix on a mnemonic of less than two
12254 if (end
- base
< 3)
12257 /* Look for suffixed mnemonic. */
12259 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
12260 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
12261 if (opcode
&& cond
)
12264 switch (opcode
->tag
)
12266 case OT_cinfix3_legacy
:
12267 /* Ignore conditional suffixes matched on infix only mnemonics. */
12271 case OT_odd_infix_unc
:
12272 if (!unified_syntax
)
12274 /* else fall through */
12277 case OT_csuf_or_in3
:
12278 inst
.cond
= cond
->value
;
12281 case OT_unconditional
:
12282 case OT_unconditionalF
:
12285 inst
.cond
= cond
->value
;
12289 /* delayed diagnostic */
12290 inst
.error
= BAD_COND
;
12291 inst
.cond
= COND_ALWAYS
;
12300 /* Cannot have a usual-position infix on a mnemonic of less than
12301 six characters (five would be a suffix). */
12302 if (end
- base
< 6)
12305 /* Look for infixed mnemonic in the usual position. */
12307 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
12311 memcpy (save
, affix
, 2);
12312 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
12313 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
12314 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
12315 memcpy (affix
, save
, 2);
12317 if (opcode
&& (opcode
->tag
== OT_cinfix3
|| opcode
->tag
== OT_csuf_or_in3
12318 || opcode
->tag
== OT_cinfix3_legacy
))
12321 if (unified_syntax
&& opcode
->tag
== OT_cinfix3
)
12322 as_warn (_("conditional infixes are deprecated in unified syntax"));
12324 inst
.cond
= cond
->value
;
12332 md_assemble (char *str
)
12335 const struct asm_opcode
* opcode
;
12337 /* Align the previous label if needed. */
12338 if (last_label_seen
!= NULL
)
12340 symbol_set_frag (last_label_seen
, frag_now
);
12341 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
12342 S_SET_SEGMENT (last_label_seen
, now_seg
);
12345 memset (&inst
, '\0', sizeof (inst
));
12346 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12348 opcode
= opcode_lookup (&p
);
12351 /* It wasn't an instruction, but it might be a register alias of
12352 the form alias .req reg, or a Neon .dn/.qn directive. */
12353 if (!create_register_alias (str
, p
)
12354 && !create_neon_reg_alias (str
, p
))
12355 as_bad (_("bad instruction `%s'"), str
);
12362 arm_feature_set variant
;
12364 variant
= cpu_variant
;
12365 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12366 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
12367 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
12368 /* Check that this instruction is supported for this CPU. */
12369 if (!opcode
->tvariant
12370 || (thumb_mode
== 1
12371 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
12373 as_bad (_("selected processor does not support `%s'"), str
);
12376 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
12377 && opcode
->tencode
!= do_t_branch
)
12379 as_bad (_("Thumb does not support conditional execution"));
12383 /* Check conditional suffixes. */
12384 if (current_it_mask
)
12387 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
12388 current_it_mask
<<= 1;
12389 current_it_mask
&= 0x1f;
12390 /* The BKPT instruction is unconditional even in an IT block. */
12392 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
12394 as_bad (_("incorrect condition in IT block"));
12398 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
12400 as_bad (_("thumb conditional instrunction not in IT block"));
12404 mapping_state (MAP_THUMB
);
12405 inst
.instruction
= opcode
->tvalue
;
12407 if (!parse_operands (p
, opcode
->operands
))
12408 opcode
->tencode ();
12410 /* Clear current_it_mask at the end of an IT block. */
12411 if (current_it_mask
== 0x10)
12412 current_it_mask
= 0;
12414 if (!(inst
.error
|| inst
.relax
))
12416 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
12417 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
12418 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
12420 as_bad (_("cannot honor width suffix -- `%s'"), str
);
12424 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12425 *opcode
->tvariant
);
12426 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12427 set those bits when Thumb-2 32-bit instructions are seen. ie.
12428 anything other than bl/blx.
12429 This is overly pessimistic for relaxable instructions. */
12430 if ((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
12432 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12437 /* Check that this instruction is supported for this CPU. */
12438 if (!opcode
->avariant
||
12439 !ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
))
12441 as_bad (_("selected processor does not support `%s'"), str
);
12446 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
12450 mapping_state (MAP_ARM
);
12451 inst
.instruction
= opcode
->avalue
;
12452 if (opcode
->tag
== OT_unconditionalF
)
12453 inst
.instruction
|= 0xF << 28;
12455 inst
.instruction
|= inst
.cond
<< 28;
12456 inst
.size
= INSN_SIZE
;
12457 if (!parse_operands (p
, opcode
->operands
))
12458 opcode
->aencode ();
12459 /* Arm mode bx is marked as both v4T and v5 because it's still required
12460 on a hypothetical non-thumb v5 core. */
12461 if (ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v4t
)
12462 || ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v5
))
12463 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
12465 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
12466 *opcode
->avariant
);
12471 /* Various frobbings of labels and their addresses. */
12474 arm_start_line_hook (void)
12476 last_label_seen
= NULL
;
12480 arm_frob_label (symbolS
* sym
)
12482 last_label_seen
= sym
;
12484 ARM_SET_THUMB (sym
, thumb_mode
);
12486 #if defined OBJ_COFF || defined OBJ_ELF
12487 ARM_SET_INTERWORK (sym
, support_interwork
);
12490 /* Note - do not allow local symbols (.Lxxx) to be labeled
12491 as Thumb functions. This is because these labels, whilst
12492 they exist inside Thumb code, are not the entry points for
12493 possible ARM->Thumb calls. Also, these labels can be used
12494 as part of a computed goto or switch statement. eg gcc
12495 can generate code that looks like this:
12497 ldr r2, [pc, .Laaa]
12507 The first instruction loads the address of the jump table.
12508 The second instruction converts a table index into a byte offset.
12509 The third instruction gets the jump address out of the table.
12510 The fourth instruction performs the jump.
12512 If the address stored at .Laaa is that of a symbol which has the
12513 Thumb_Func bit set, then the linker will arrange for this address
12514 to have the bottom bit set, which in turn would mean that the
12515 address computation performed by the third instruction would end
12516 up with the bottom bit set. Since the ARM is capable of unaligned
12517 word loads, the instruction would then load the incorrect address
12518 out of the jump table, and chaos would ensue. */
12519 if (label_is_thumb_function_name
12520 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
12521 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
12523 /* When the address of a Thumb function is taken the bottom
12524 bit of that address should be set. This will allow
12525 interworking between Arm and Thumb functions to work
12528 THUMB_SET_FUNC (sym
, 1);
12530 label_is_thumb_function_name
= FALSE
;
12534 dwarf2_emit_label (sym
);
12539 arm_data_in_code (void)
12541 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
12543 *input_line_pointer
= '/';
12544 input_line_pointer
+= 5;
12545 *input_line_pointer
= 0;
12553 arm_canonicalize_symbol_name (char * name
)
12557 if (thumb_mode
&& (len
= strlen (name
)) > 5
12558 && streq (name
+ len
- 5, "/data"))
12559 *(name
+ len
- 5) = 0;
12564 /* Table of all register names defined by default. The user can
12565 define additional names with .req. Note that all register names
12566 should appear in both upper and lowercase variants. Some registers
12567 also have mixed-case names. */
12569 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12570 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12571 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12572 #define REGSET(p,t) \
12573 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12574 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12575 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12576 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12577 #define REGSETH(p,t) \
12578 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12579 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12580 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12581 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12582 #define REGSET2(p,t) \
12583 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12584 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12585 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12586 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12588 static const struct reg_entry reg_names
[] =
12590 /* ARM integer registers. */
12591 REGSET(r
, RN
), REGSET(R
, RN
),
12593 /* ATPCS synonyms. */
12594 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
12595 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
12596 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
12598 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
12599 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
12600 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
12602 /* Well-known aliases. */
12603 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
12604 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
12606 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
12607 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
12609 /* Coprocessor numbers. */
12610 REGSET(p
, CP
), REGSET(P
, CP
),
12612 /* Coprocessor register numbers. The "cr" variants are for backward
12614 REGSET(c
, CN
), REGSET(C
, CN
),
12615 REGSET(cr
, CN
), REGSET(CR
, CN
),
12617 /* FPA registers. */
12618 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
12619 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
12621 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
12622 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
12624 /* VFP SP registers. */
12625 REGSET(s
,VFS
), REGSET(S
,VFS
),
12626 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
12628 /* VFP DP Registers. */
12629 REGSET(d
,VFD
), REGSET(D
,VFD
),
12630 /* Extra Neon DP registers. */
12631 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
12633 /* Neon QP registers. */
12634 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
12636 /* VFP control registers. */
12637 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
12638 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
12640 /* Maverick DSP coprocessor registers. */
12641 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
12642 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
12644 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
12645 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
12646 REGDEF(dspsc
,0,DSPSC
),
12648 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
12649 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
12650 REGDEF(DSPSC
,0,DSPSC
),
12652 /* iWMMXt data registers - p0, c0-15. */
12653 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
12655 /* iWMMXt control registers - p1, c0-3. */
12656 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
12657 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
12658 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
12659 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
12661 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12662 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
12663 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
12664 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
12665 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
12667 /* XScale accumulator registers. */
12668 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
12674 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12675 within psr_required_here. */
12676 static const struct asm_psr psrs
[] =
12678 /* Backward compatibility notation. Note that "all" is no longer
12679 truly all possible PSR bits. */
12680 {"all", PSR_c
| PSR_f
},
12684 /* Individual flags. */
12689 /* Combinations of flags. */
12690 {"fs", PSR_f
| PSR_s
},
12691 {"fx", PSR_f
| PSR_x
},
12692 {"fc", PSR_f
| PSR_c
},
12693 {"sf", PSR_s
| PSR_f
},
12694 {"sx", PSR_s
| PSR_x
},
12695 {"sc", PSR_s
| PSR_c
},
12696 {"xf", PSR_x
| PSR_f
},
12697 {"xs", PSR_x
| PSR_s
},
12698 {"xc", PSR_x
| PSR_c
},
12699 {"cf", PSR_c
| PSR_f
},
12700 {"cs", PSR_c
| PSR_s
},
12701 {"cx", PSR_c
| PSR_x
},
12702 {"fsx", PSR_f
| PSR_s
| PSR_x
},
12703 {"fsc", PSR_f
| PSR_s
| PSR_c
},
12704 {"fxs", PSR_f
| PSR_x
| PSR_s
},
12705 {"fxc", PSR_f
| PSR_x
| PSR_c
},
12706 {"fcs", PSR_f
| PSR_c
| PSR_s
},
12707 {"fcx", PSR_f
| PSR_c
| PSR_x
},
12708 {"sfx", PSR_s
| PSR_f
| PSR_x
},
12709 {"sfc", PSR_s
| PSR_f
| PSR_c
},
12710 {"sxf", PSR_s
| PSR_x
| PSR_f
},
12711 {"sxc", PSR_s
| PSR_x
| PSR_c
},
12712 {"scf", PSR_s
| PSR_c
| PSR_f
},
12713 {"scx", PSR_s
| PSR_c
| PSR_x
},
12714 {"xfs", PSR_x
| PSR_f
| PSR_s
},
12715 {"xfc", PSR_x
| PSR_f
| PSR_c
},
12716 {"xsf", PSR_x
| PSR_s
| PSR_f
},
12717 {"xsc", PSR_x
| PSR_s
| PSR_c
},
12718 {"xcf", PSR_x
| PSR_c
| PSR_f
},
12719 {"xcs", PSR_x
| PSR_c
| PSR_s
},
12720 {"cfs", PSR_c
| PSR_f
| PSR_s
},
12721 {"cfx", PSR_c
| PSR_f
| PSR_x
},
12722 {"csf", PSR_c
| PSR_s
| PSR_f
},
12723 {"csx", PSR_c
| PSR_s
| PSR_x
},
12724 {"cxf", PSR_c
| PSR_x
| PSR_f
},
12725 {"cxs", PSR_c
| PSR_x
| PSR_s
},
12726 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
12727 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
12728 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
12729 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
12730 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
12731 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
12732 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
12733 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
12734 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
12735 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
12736 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
12737 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
12738 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
12739 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
12740 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
12741 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
12742 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
12743 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
12744 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
12745 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
12746 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
12747 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
12748 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
12749 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
12752 /* Table of V7M psr names. */
12753 static const struct asm_psr v7m_psrs
[] =
12766 {"basepri_max", 18},
12771 /* Table of all shift-in-operand names. */
12772 static const struct asm_shift_name shift_names
[] =
12774 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
12775 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
12776 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
12777 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
12778 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
12779 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
12782 /* Table of all explicit relocation names. */
12784 static struct reloc_entry reloc_names
[] =
12786 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
12787 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
12788 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
12789 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
12790 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
12791 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
12792 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
12793 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
12794 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
12795 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
12796 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
12800 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12801 static const struct asm_cond conds
[] =
12805 {"cs", 0x2}, {"hs", 0x2},
12806 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12820 static struct asm_barrier_opt barrier_opt_names
[] =
12828 /* Table of ARM-format instructions. */
12830 /* Macros for gluing together operand strings. N.B. In all cases
12831 other than OPS0, the trailing OP_stop comes from default
12832 zero-initialization of the unspecified elements of the array. */
12833 #define OPS0() { OP_stop, }
12834 #define OPS1(a) { OP_##a, }
12835 #define OPS2(a,b) { OP_##a,OP_##b, }
12836 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12837 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12838 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12839 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12841 /* These macros abstract out the exact format of the mnemonic table and
12842 save some repeated characters. */
12844 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12845 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12846 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12847 THUMB_VARIANT, do_##ae, do_##te }
12849 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12850 a T_MNEM_xyz enumerator. */
12851 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12852 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12853 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12854 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12856 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12857 infix after the third character. */
12858 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12859 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12860 THUMB_VARIANT, do_##ae, do_##te }
12861 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12862 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12863 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12864 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12866 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12867 appear in the condition table. */
12868 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12869 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12870 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12872 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12873 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12874 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12875 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12876 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12877 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
12878 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
12879 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
12880 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
12881 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
12882 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
12883 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
12884 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
12885 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
12886 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
12887 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
12888 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
12889 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
12890 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
12891 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
12893 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
12894 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
12895 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
12896 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
12898 /* Mnemonic that cannot be conditionalized. The ARM condition-code
12899 field is still 0xE. Many of the Thumb variants can be executed
12900 conditionally, so this is checked separately. */
12901 #define TUE(mnem, op, top, nops, ops, ae, te) \
12902 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
12903 THUMB_VARIANT, do_##ae, do_##te }
12905 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
12906 condition code field. */
12907 #define TUF(mnem, op, top, nops, ops, ae, te) \
12908 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
12909 THUMB_VARIANT, do_##ae, do_##te }
12911 /* ARM-only variants of all the above. */
12912 #define CE(mnem, op, nops, ops, ae) \
12913 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12915 #define C3(mnem, op, nops, ops, ae) \
12916 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12918 /* Legacy mnemonics that always have conditional infix after the third
12920 #define CL(mnem, op, nops, ops, ae) \
12921 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12922 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12924 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
12925 #define cCE(mnem, op, nops, ops, ae) \
12926 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12928 /* Legacy coprocessor instructions where conditional infix and conditional
12929 suffix are ambiguous. For consistency this includes all FPA instructions,
12930 not just the potentially ambiguous ones. */
12931 #define cCL(mnem, op, nops, ops, ae) \
12932 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12933 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12935 /* Coprocessor, takes either a suffix or a position-3 infix
12936 (for an FPA corner case). */
12937 #define C3E(mnem, op, nops, ops, ae) \
12938 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
12939 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12941 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
12942 { #m1 #m2 #m3, OPS##nops ops, \
12943 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12944 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12946 #define CM(m1, m2, op, nops, ops, ae) \
12947 xCM_(m1, , m2, op, nops, ops, ae), \
12948 xCM_(m1, eq, m2, op, nops, ops, ae), \
12949 xCM_(m1, ne, m2, op, nops, ops, ae), \
12950 xCM_(m1, cs, m2, op, nops, ops, ae), \
12951 xCM_(m1, hs, m2, op, nops, ops, ae), \
12952 xCM_(m1, cc, m2, op, nops, ops, ae), \
12953 xCM_(m1, ul, m2, op, nops, ops, ae), \
12954 xCM_(m1, lo, m2, op, nops, ops, ae), \
12955 xCM_(m1, mi, m2, op, nops, ops, ae), \
12956 xCM_(m1, pl, m2, op, nops, ops, ae), \
12957 xCM_(m1, vs, m2, op, nops, ops, ae), \
12958 xCM_(m1, vc, m2, op, nops, ops, ae), \
12959 xCM_(m1, hi, m2, op, nops, ops, ae), \
12960 xCM_(m1, ls, m2, op, nops, ops, ae), \
12961 xCM_(m1, ge, m2, op, nops, ops, ae), \
12962 xCM_(m1, lt, m2, op, nops, ops, ae), \
12963 xCM_(m1, gt, m2, op, nops, ops, ae), \
12964 xCM_(m1, le, m2, op, nops, ops, ae), \
12965 xCM_(m1, al, m2, op, nops, ops, ae)
12967 #define UE(mnem, op, nops, ops, ae) \
12968 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12970 #define UF(mnem, op, nops, ops, ae) \
12971 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12973 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
12974 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
12975 use the same encoding function for each. */
12976 #define NUF(mnem, op, nops, ops, enc) \
12977 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
12978 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12980 /* Neon data processing, version which indirects through neon_enc_tab for
12981 the various overloaded versions of opcodes. */
12982 #define nUF(mnem, op, nops, ops, enc) \
12983 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
12984 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12986 /* Neon insn with conditional suffix for the ARM version, non-overloaded
12988 #define NCE(mnem, op, nops, ops, enc) \
12989 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
12990 THUMB_VARIANT, do_##enc, do_##enc }
12992 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
12993 #define nCE(mnem, op, nops, ops, enc) \
12994 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
12995 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12999 /* Thumb-only, unconditional. */
13000 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13002 static const struct asm_opcode insns
[] =
13004 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13005 #define THUMB_VARIANT &arm_ext_v4t
13006 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13007 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13008 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13009 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13010 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13011 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13012 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13013 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13014 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13015 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13016 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13017 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13018 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13019 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13020 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13021 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13023 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13024 for setting PSR flag bits. They are obsolete in V6 and do not
13025 have Thumb equivalents. */
13026 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13027 tC3(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13028 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
13029 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
13030 tC3(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
13031 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
13032 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13033 tC3(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13034 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
13036 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
13037 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
13038 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
13039 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
13041 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13042 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13043 tCE(str
, 4000000, str
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13044 tC3(strb
, 4400000, strb
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13046 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13047 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13048 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13049 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13050 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13051 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13053 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
13054 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
13055 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
13056 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
13059 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
13060 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
13061 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
13063 /* Thumb-compatibility pseudo ops. */
13064 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13065 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13066 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13067 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13068 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13069 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13070 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13071 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13072 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
13073 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
13074 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
13075 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
13077 #undef THUMB_VARIANT
13078 #define THUMB_VARIANT &arm_ext_v6
13079 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
13081 /* V1 instructions with no Thumb analogue prior to V6T2. */
13082 #undef THUMB_VARIANT
13083 #define THUMB_VARIANT &arm_ext_v6t2
13084 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
13085 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
13086 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13087 TC3(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13088 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
13090 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13091 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13092 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13093 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13095 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13096 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13098 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13099 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13101 /* V1 instructions with no Thumb analogue at all. */
13102 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
13103 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
13105 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
13106 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
13107 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
13108 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
13109 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
13110 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
13111 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
13112 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
13115 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13116 #undef THUMB_VARIANT
13117 #define THUMB_VARIANT &arm_ext_v4t
13118 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
13119 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
13121 #undef THUMB_VARIANT
13122 #define THUMB_VARIANT &arm_ext_v6t2
13123 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
13124 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
13126 /* Generic coprocessor instructions. */
13127 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
13128 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13129 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13130 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13131 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13132 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13133 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13136 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13137 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
13138 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
13141 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13142 TCE(mrs
, 10f0000
, f3ef8000
, 2, (RR
, PSR
), mrs
, t_mrs
),
13143 TCE(msr
, 120f000
, f3808000
, 2, (PSR
, RR_EXi
), msr
, t_msr
),
13146 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13147 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13148 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13149 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13150 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13151 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13152 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13153 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13154 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13157 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13158 #undef THUMB_VARIANT
13159 #define THUMB_VARIANT &arm_ext_v4t
13160 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13161 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13162 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13163 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13164 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13165 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13168 #define ARM_VARIANT &arm_ext_v4t_5
13169 /* ARM Architecture 4T. */
13170 /* Note: bx (and blx) are required on V5, even if the processor does
13171 not support Thumb. */
13172 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
13175 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13176 #undef THUMB_VARIANT
13177 #define THUMB_VARIANT &arm_ext_v5t
13178 /* Note: blx has 2 variants; the .value coded here is for
13179 BLX(2). Only this variant has conditional execution. */
13180 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
13181 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
13183 #undef THUMB_VARIANT
13184 #define THUMB_VARIANT &arm_ext_v6t2
13185 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
13186 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13187 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13188 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13189 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13190 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
13191 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13192 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13195 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13196 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13197 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13198 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13199 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13201 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13202 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13204 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13205 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13206 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13207 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13209 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13210 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13211 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13212 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13214 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13215 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13217 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13218 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13219 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13220 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13223 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13224 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
13225 TC3(ldrd
, 00000d0
, e9500000
, 3, (RRnpc
, oRRnpc
, ADDR
), ldrd
, t_ldstd
),
13226 TC3(strd
, 00000f0
, e9400000
, 3, (RRnpc
, oRRnpc
, ADDR
), ldrd
, t_ldstd
),
13228 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13229 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13232 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13233 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
13236 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13237 #undef THUMB_VARIANT
13238 #define THUMB_VARIANT &arm_ext_v6
13239 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
13240 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
13241 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
13242 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
13243 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
13244 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13245 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13246 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13247 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13248 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
13250 #undef THUMB_VARIANT
13251 #define THUMB_VARIANT &arm_ext_v6t2
13252 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
13253 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13254 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13256 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
13257 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
13259 /* ARM V6 not included in V7M (eg. integer SIMD). */
13260 #undef THUMB_VARIANT
13261 #define THUMB_VARIANT &arm_ext_v6_notm
13262 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
13263 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
13264 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
13265 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13266 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13267 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13268 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13269 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13270 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13271 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13272 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13273 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13274 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13275 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13276 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13277 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13278 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13279 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13280 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13281 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13282 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13283 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13284 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13285 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13286 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13287 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13288 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13289 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13290 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13291 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13292 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13293 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13294 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13295 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13296 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13297 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13298 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13299 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13300 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13301 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
13302 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
13303 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
13304 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
13305 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
13306 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
13307 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
13308 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
13309 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13310 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13311 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13312 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13313 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13314 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13315 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13316 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13317 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13318 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13319 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13320 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13321 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13322 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13323 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13324 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13325 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13326 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13327 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13328 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13329 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13330 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13331 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13332 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13333 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13334 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13335 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13336 TUF(srsia
, 8cd0500
, e980c000
, 1, (I31w
), srs
, srs
),
13337 UF(srsib
, 9cd0500
, 1, (I31w
), srs
),
13338 UF(srsda
, 84d0500
, 1, (I31w
), srs
),
13339 TUF(srsdb
, 94d0500
, e800c000
, 1, (I31w
), srs
, srs
),
13340 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
13341 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
13342 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
13343 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13344 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13345 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
13348 #define ARM_VARIANT &arm_ext_v6k
13349 #undef THUMB_VARIANT
13350 #define THUMB_VARIANT &arm_ext_v6k
13351 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
13352 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
13353 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
13354 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
13356 #undef THUMB_VARIANT
13357 #define THUMB_VARIANT &arm_ext_v6_notm
13358 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
13359 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
13361 #undef THUMB_VARIANT
13362 #define THUMB_VARIANT &arm_ext_v6t2
13363 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
13364 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
13365 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
13366 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
13367 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
13370 #define ARM_VARIANT &arm_ext_v6z
13371 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
13374 #define ARM_VARIANT &arm_ext_v6t2
13375 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
13376 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
13377 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
13378 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
13380 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
13381 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, Iffff
), mov16
, t_mov16
),
13382 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, Iffff
), mov16
, t_mov16
),
13383 TCE(rbit
, 3ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
13385 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13386 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13387 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13388 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13390 UT(cbnz
, b900
, 2, (RR
, EXP
), t_czb
),
13391 UT(cbz
, b100
, 2, (RR
, EXP
), t_czb
),
13392 /* ARM does not really have an IT instruction. */
13393 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
13394 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
13395 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
13396 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
13397 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
13398 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
13399 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
13400 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
13401 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
13402 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
13403 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
13404 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
13405 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
13406 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
13407 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
13409 /* Thumb2 only instructions. */
13411 #define ARM_VARIANT NULL
13413 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
13414 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
13415 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
13416 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
13418 /* Thumb-2 hardware division instructions (R and M profiles only). */
13419 #undef THUMB_VARIANT
13420 #define THUMB_VARIANT &arm_ext_div
13421 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
13422 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
13424 /* ARM V7 instructions. */
13426 #define ARM_VARIANT &arm_ext_v7
13427 #undef THUMB_VARIANT
13428 #define THUMB_VARIANT &arm_ext_v7
13429 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
13430 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
13431 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
13432 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
13433 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
13436 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13437 cCE(wfs
, e200110
, 1, (RR
), rd
),
13438 cCE(rfs
, e300110
, 1, (RR
), rd
),
13439 cCE(wfc
, e400110
, 1, (RR
), rd
),
13440 cCE(rfc
, e500110
, 1, (RR
), rd
),
13442 cCL(ldfs
, c100100
, 2, (RF
, ADDR
), rd_cpaddr
),
13443 cCL(ldfd
, c108100
, 2, (RF
, ADDR
), rd_cpaddr
),
13444 cCL(ldfe
, c500100
, 2, (RF
, ADDR
), rd_cpaddr
),
13445 cCL(ldfp
, c508100
, 2, (RF
, ADDR
), rd_cpaddr
),
13447 cCL(stfs
, c000100
, 2, (RF
, ADDR
), rd_cpaddr
),
13448 cCL(stfd
, c008100
, 2, (RF
, ADDR
), rd_cpaddr
),
13449 cCL(stfe
, c400100
, 2, (RF
, ADDR
), rd_cpaddr
),
13450 cCL(stfp
, c408100
, 2, (RF
, ADDR
), rd_cpaddr
),
13452 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
13453 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
13454 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
13455 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
13456 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
13457 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
13458 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
13459 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
13460 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
13461 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
13462 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
13463 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
13465 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
13466 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
13467 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
13468 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
13469 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
13470 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
13471 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
13472 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
13473 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
13474 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
13475 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
13476 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
13478 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
13479 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
13480 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
13481 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
13482 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
13483 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
13484 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
13485 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
13486 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
13487 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
13488 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
13489 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
13491 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
13492 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
13493 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
13494 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
13495 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
13496 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
13497 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
13498 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
13499 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
13500 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
13501 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
13502 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
13504 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
13505 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
13506 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
13507 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
13508 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
13509 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
13510 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
13511 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
13512 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
13513 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
13514 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
13515 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
13517 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
13518 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
13519 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
13520 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
13521 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
13522 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
13523 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
13524 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
13525 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
13526 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
13527 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
13528 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
13530 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
13531 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
13532 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
13533 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
13534 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
13535 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
13536 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
13537 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
13538 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
13539 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
13540 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
13541 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
13543 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
13544 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
13545 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
13546 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
13547 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
13548 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
13549 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
13550 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
13551 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
13552 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
13553 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
13554 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
13556 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
13557 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
13558 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
13559 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
13560 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
13561 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
13562 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
13563 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
13564 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
13565 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
13566 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
13567 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
13569 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
13570 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
13571 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
13572 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
13573 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
13574 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
13575 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
13576 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
13577 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
13578 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
13579 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
13580 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
13582 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
13583 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
13584 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
13585 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
13586 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
13587 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
13588 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
13589 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
13590 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
13591 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
13592 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
13593 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
13595 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
13596 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
13597 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
13598 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
13599 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
13600 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
13601 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
13602 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
13603 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
13604 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
13605 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
13606 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
13608 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
13609 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
13610 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
13611 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
13612 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
13613 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
13614 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
13615 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
13616 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
13617 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
13618 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
13619 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
13621 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
13622 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
13623 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
13624 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
13625 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
13626 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
13627 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
13628 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
13629 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
13630 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
13631 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
13632 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
13634 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
13635 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
13636 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
13637 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
13638 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
13639 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
13640 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
13641 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
13642 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
13643 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
13644 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
13645 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
13647 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
13648 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
13649 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
13650 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
13651 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
13652 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
13653 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
13654 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
13655 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
13656 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
13657 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
13658 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
13660 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13661 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13662 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13663 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13664 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13665 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13666 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13667 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13668 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13669 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13670 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13671 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13673 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13674 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13675 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13676 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13677 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13678 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13679 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13680 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13681 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13682 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13683 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13684 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13686 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13687 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13688 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13689 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13690 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13691 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13692 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13693 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13694 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13695 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13696 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13697 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13699 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13700 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13701 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13702 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13703 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13704 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13705 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13706 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13707 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13708 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13709 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13710 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13712 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13713 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13714 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13715 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13716 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13717 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13718 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13719 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13720 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13721 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13722 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13723 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13725 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13726 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13727 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13728 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13729 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13730 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13731 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13732 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13733 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13734 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13735 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13736 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13738 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13739 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13740 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13741 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13742 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13743 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13744 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13745 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13746 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13747 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13748 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13749 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13751 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13752 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13753 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13754 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13755 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13756 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13757 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13758 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13759 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13760 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13761 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13762 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13764 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13765 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13766 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13767 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13768 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13769 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13770 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13771 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13772 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13773 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13774 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13775 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13777 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13778 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13779 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13780 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13781 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13782 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13783 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13784 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13785 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13786 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13787 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13788 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13790 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13791 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13792 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13793 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13794 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13795 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13796 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13797 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13798 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13799 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13800 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13801 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13803 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13804 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13805 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13806 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13807 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13808 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13809 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13810 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13811 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13812 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13813 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13814 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13816 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13817 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13818 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13819 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13820 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13821 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13822 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13823 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13824 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13825 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13826 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13827 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13829 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13830 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13831 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13832 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13834 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
13835 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
13836 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
13837 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
13838 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
13839 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
13840 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
13841 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
13842 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
13843 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
13844 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
13845 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
13847 /* The implementation of the FIX instruction is broken on some
13848 assemblers, in that it accepts a precision specifier as well as a
13849 rounding specifier, despite the fact that this is meaningless.
13850 To be more compatible, we accept it as well, though of course it
13851 does not set any bits. */
13852 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
13853 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
13854 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
13855 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
13856 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
13857 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
13858 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
13859 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
13860 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
13861 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
13862 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
13863 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
13864 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
13866 /* Instructions that were new with the real FPA, call them V2. */
13868 #define ARM_VARIANT &fpu_fpa_ext_v2
13869 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13870 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13871 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13872 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13873 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13874 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13877 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
13878 /* Moves and type conversions. */
13879 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13880 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
13881 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
13882 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
13883 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13884 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13885 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13886 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13887 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13888 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13889 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
13890 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
13892 /* Memory operations. */
13893 cCE(flds
, d100a00
, 2, (RVS
, ADDR
), vfp_sp_ldst
),
13894 cCE(fsts
, d000a00
, 2, (RVS
, ADDR
), vfp_sp_ldst
),
13895 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13896 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13897 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13898 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13899 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13900 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13901 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13902 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13903 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13904 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13905 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13906 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13907 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13908 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13909 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13910 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13912 /* Monadic operations. */
13913 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13914 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13915 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13917 /* Dyadic operations. */
13918 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13919 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13920 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13921 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13922 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13923 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13924 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13925 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13926 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13929 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13930 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
13931 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13932 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
13935 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
13936 /* Moves and type conversions. */
13937 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13938 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
13939 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13940 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
13941 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
13942 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
13943 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
13944 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
13945 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
13946 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13947 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13948 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13949 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13951 /* Memory operations. */
13952 cCE(fldd
, d100b00
, 2, (RVD
, ADDR
), vfp_dp_ldst
),
13953 cCE(fstd
, d000b00
, 2, (RVD
, ADDR
), vfp_dp_ldst
),
13954 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13955 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13956 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13957 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13958 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13959 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13960 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13961 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13963 /* Monadic operations. */
13964 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13965 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13966 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13968 /* Dyadic operations. */
13969 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13970 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13971 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13972 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13973 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13974 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13975 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13976 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13977 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13980 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13981 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
13982 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13983 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
13986 #define ARM_VARIANT &fpu_vfp_ext_v2
13987 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
13988 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
13989 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
13990 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
13992 #undef THUMB_VARIANT
13993 #define THUMB_VARIANT &fpu_neon_ext_v1
13995 #define ARM_VARIANT &fpu_neon_ext_v1
13996 /* Data processing with three registers of the same length. */
13997 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
13998 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
13999 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
14000 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
14001 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
14002 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
14003 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
14004 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
14005 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
14006 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14007 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14008 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14009 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14010 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14011 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14012 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14013 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14014 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14015 /* If not immediate, fall back to neon_dyadic_i64_su.
14016 shl_imm should accept I8 I16 I32 I64,
14017 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14018 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
14019 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
14020 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
14021 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
14022 /* Logic ops, types optional & ignored. */
14023 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
14024 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
14025 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
14026 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
14027 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
14028 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
14029 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
14030 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
14031 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
14032 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
14033 /* Bitfield ops, untyped. */
14034 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
14035 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
14036 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
14037 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
14038 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
14039 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
14040 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14041 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
14042 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
14043 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
14044 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
14045 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
14046 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
14047 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14048 back to neon_dyadic_if_su. */
14049 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
14050 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
14051 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
14052 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
14053 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
14054 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
14055 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
14056 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
14057 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14058 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
14059 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
14060 /* As above, D registers only. */
14061 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
14062 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
14063 /* Int and float variants, signedness unimportant. */
14064 /* If not scalar, fall back to neon_dyadic_if_i. */
14065 nUF(vmla
, vmla
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14066 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14067 nUF(vmls
, vmls
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14068 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14069 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
14070 /* Add/sub take types I8 I16 I32 I64 F32. */
14071 nUF(vadd
, vadd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_addsub_if_i
),
14072 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
14073 nUF(vsub
, vsub
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_addsub_if_i
),
14074 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
14075 /* vtst takes sizes 8, 16, 32. */
14076 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
14077 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
14078 /* VMUL takes I8 I16 I32 F32 P8. */
14079 nUF(vmul
, vmul
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_mul
),
14080 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
14081 /* VQD{R}MULH takes S16 S32. */
14082 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
14083 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
14084 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
14085 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
14086 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
14087 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
14088 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
14089 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
14090 NUF(vaclt
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
14091 NUF(vacltq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
14092 NUF(vacle
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
14093 NUF(vacleq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
14094 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
14095 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
14096 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
14097 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
14099 /* Two address, int/float. Types S8 S16 S32 F32. */
14100 NUF(vabs
, 1b10300
, 2, (RNDQ
, RNDQ
), neon_abs_neg
),
14101 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
14102 NUF(vneg
, 1b10380
, 2, (RNDQ
, RNDQ
), neon_abs_neg
),
14103 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
14105 /* Data processing with two registers and a shift amount. */
14106 /* Right shifts, and variants with rounding.
14107 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14108 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
14109 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
14110 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
14111 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
14112 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
14113 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
14114 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
14115 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
14116 /* Shift and insert. Sizes accepted 8 16 32 64. */
14117 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
14118 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
14119 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
14120 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
14121 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14122 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
14123 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
14124 /* Right shift immediate, saturating & narrowing, with rounding variants.
14125 Types accepted S16 S32 S64 U16 U32 U64. */
14126 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
14127 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
14128 /* As above, unsigned. Types accepted S16 S32 S64. */
14129 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
14130 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
14131 /* Right shift narrowing. Types accepted I16 I32 I64. */
14132 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
14133 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
14134 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14135 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
14136 /* CVT with optional immediate for fixed-point variant. */
14137 nUF(vcvt
, vcvt
, 3, (RNDQ
, RNDQ
, oI32b
), neon_cvt
),
14138 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
14140 /* One register and an immediate value. All encoding special-cased! */
14141 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
14142 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
14143 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
14144 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
14146 /* Data processing, three registers of different lengths. */
14147 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14148 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
14149 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
14150 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
14151 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
14152 /* If not scalar, fall back to neon_dyadic_long.
14153 Vector types as above, scalar types S16 S32 U16 U32. */
14154 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
14155 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
14156 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14157 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
14158 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
14159 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14160 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14161 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14162 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14163 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14164 /* Saturating doubling multiplies. Types S16 S32. */
14165 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
14166 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
14167 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
14168 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14169 S16 S32 U16 U32. */
14170 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
14172 /* Extract. Size 8. */
14173 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I7
), neon_ext
),
14174 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I7
), neon_ext
),
14176 /* Two registers, miscellaneous. */
14177 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14178 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
14179 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
14180 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
14181 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
14182 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
14183 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
14184 /* Vector replicate. Sizes 8 16 32. */
14185 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
14186 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
14187 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14188 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
14189 /* VMOVN. Types I16 I32 I64. */
14190 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
14191 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14192 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
14193 /* VQMOVUN. Types S16 S32 S64. */
14194 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
14195 /* VZIP / VUZP. Sizes 8 16 32. */
14196 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
14197 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
14198 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
14199 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
14200 /* VQABS / VQNEG. Types S8 S16 S32. */
14201 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
14202 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
14203 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
14204 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
14205 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14206 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
14207 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
14208 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
14209 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
14210 /* Reciprocal estimates. Types U32 F32. */
14211 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
14212 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
14213 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
14214 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
14215 /* VCLS. Types S8 S16 S32. */
14216 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
14217 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
14218 /* VCLZ. Types I8 I16 I32. */
14219 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
14220 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
14221 /* VCNT. Size 8. */
14222 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
14223 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
14224 /* Two address, untyped. */
14225 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
14226 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
14227 /* VTRN. Sizes 8 16 32. */
14228 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
14229 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
14231 /* Table lookup. Size 8. */
14232 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
14233 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
14235 #undef THUMB_VARIANT
14236 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14238 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14240 /* Load/store instructions. Available in Neon or VFPv3. */
14241 NCE(vldm
, c900b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14242 NCE(vldmia
, c900b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14243 NCE(vldmdb
, d100b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14244 NCE(vstm
, c800b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14245 NCE(vstmia
, c800b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14246 NCE(vstmdb
, d000b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14247 NCE(vldr
, d100b00
, 2, (RND
, ADDR
), neon_ldr_str
),
14248 NCE(vstr
, d000b00
, 2, (RND
, ADDR
), neon_ldr_str
),
14250 /* Neon element/structure load/store. */
14251 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14252 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14253 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14254 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14255 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14256 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14257 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14258 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14260 #undef THUMB_VARIANT
14261 #define THUMB_VARIANT &fpu_vfp_ext_v3
14263 #define ARM_VARIANT &fpu_vfp_ext_v3
14265 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
14266 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
14267 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14268 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14269 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14270 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14271 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14272 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14273 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14274 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14275 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14276 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14277 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14278 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14279 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14280 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14281 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14282 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14284 #undef THUMB_VARIANT
14286 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14287 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14288 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14289 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14290 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14291 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14292 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14293 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
14294 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
14297 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14298 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
14299 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
14300 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
14301 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
14302 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
14303 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
14304 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
14305 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
14306 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
14307 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14308 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14309 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14310 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14311 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14312 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14313 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
14314 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
14315 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
14316 cCE(tmcr
, e000110
, 2, (RIWC
, RR
), rn_rd
),
14317 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
14318 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14319 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14320 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14321 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14322 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14323 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14324 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
14325 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
14326 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
14327 cCE(tmrc
, e100110
, 2, (RR
, RIWC
), rd_rn
),
14328 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
14329 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
14330 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
14331 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
14332 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
14333 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
14334 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
14335 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14336 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14337 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14338 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14339 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14340 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14341 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14342 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14343 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14344 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
14345 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14346 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14347 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14348 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14349 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14350 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14351 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14352 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14353 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14354 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14355 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14356 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14357 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14358 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14359 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14360 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14361 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14362 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14363 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14364 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14365 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14366 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
14367 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
14368 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14369 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14370 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14371 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14372 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14373 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14374 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14375 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14376 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14377 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14378 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14379 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14380 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14381 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14382 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14383 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14384 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14385 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14386 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
14387 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14388 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14389 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14390 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14391 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14392 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14393 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14394 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14395 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14396 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14397 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14398 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14399 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14400 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14401 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14402 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14403 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14404 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14405 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14406 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14407 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14408 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
14409 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14410 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14411 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14412 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14413 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14414 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14415 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14416 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14417 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14418 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14419 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14420 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14421 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14422 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14423 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14424 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14425 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14426 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14427 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14428 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14429 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
14430 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
14431 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14432 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14433 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14434 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14435 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14436 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14437 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14438 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14439 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14440 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14441 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14442 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14443 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14444 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14445 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14446 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14447 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14448 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14449 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14450 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14451 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14452 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14453 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14454 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14455 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14456 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14457 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14458 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14459 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
14462 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14463 cCE(cfldrs
, c100400
, 2, (RMF
, ADDR
), rd_cpaddr
),
14464 cCE(cfldrd
, c500400
, 2, (RMD
, ADDR
), rd_cpaddr
),
14465 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDR
), rd_cpaddr
),
14466 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDR
), rd_cpaddr
),
14467 cCE(cfstrs
, c000400
, 2, (RMF
, ADDR
), rd_cpaddr
),
14468 cCE(cfstrd
, c400400
, 2, (RMD
, ADDR
), rd_cpaddr
),
14469 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDR
), rd_cpaddr
),
14470 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDR
), rd_cpaddr
),
14471 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
14472 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
14473 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
14474 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
14475 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
14476 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
14477 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
14478 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
14479 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
14480 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
14481 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
14482 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
14483 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
14484 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
14485 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
14486 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
14487 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
14488 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
14489 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
14490 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
14491 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
14492 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
14493 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
14494 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
14495 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
14496 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
14497 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
14498 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
14499 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
14500 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
14501 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
14502 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
14503 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
14504 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
14505 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
14506 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
14507 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
14508 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
14509 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
14510 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
14511 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
14512 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
14513 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
14514 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
14515 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
14516 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
14517 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
14518 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
14519 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
14520 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
14521 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
14522 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
14523 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
14524 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
14525 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
14526 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
14527 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14528 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
14529 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14530 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
14531 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14532 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
14533 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14534 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14535 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
14536 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
14537 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
14538 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
14541 #undef THUMB_VARIANT
14568 /* MD interface: bits in the object file. */
14570 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14571 for use in the a.out file, and stores them in the array pointed to by buf.
14572 This knows about the endian-ness of the target machine and does
14573 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14574 2 (short) and 4 (long) Floating numbers are put out as a series of
14575 LITTLENUMS (shorts, here at least). */
14578 md_number_to_chars (char * buf
, valueT val
, int n
)
14580 if (target_big_endian
)
14581 number_to_chars_bigendian (buf
, val
, n
);
14583 number_to_chars_littleendian (buf
, val
, n
);
14587 md_chars_to_number (char * buf
, int n
)
14590 unsigned char * where
= (unsigned char *) buf
;
14592 if (target_big_endian
)
14597 result
|= (*where
++ & 255);
14605 result
|= (where
[n
] & 255);
14612 /* MD interface: Sections. */
14614 /* Estimate the size of a frag before relaxing. Assume everything fits in
14618 md_estimate_size_before_relax (fragS
* fragp
,
14619 segT segtype ATTRIBUTE_UNUSED
)
14625 /* Convert a machine dependent frag. */
14628 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
14630 unsigned long insn
;
14631 unsigned long old_op
;
14639 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
14641 old_op
= bfd_get_16(abfd
, buf
);
14642 if (fragp
->fr_symbol
) {
14643 exp
.X_op
= O_symbol
;
14644 exp
.X_add_symbol
= fragp
->fr_symbol
;
14646 exp
.X_op
= O_constant
;
14648 exp
.X_add_number
= fragp
->fr_offset
;
14649 opcode
= fragp
->fr_subtype
;
14652 case T_MNEM_ldr_pc
:
14653 case T_MNEM_ldr_pc2
:
14654 case T_MNEM_ldr_sp
:
14655 case T_MNEM_str_sp
:
14662 if (fragp
->fr_var
== 4)
14664 insn
= THUMB_OP32(opcode
);
14665 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
14667 insn
|= (old_op
& 0x700) << 4;
14671 insn
|= (old_op
& 7) << 12;
14672 insn
|= (old_op
& 0x38) << 13;
14674 insn
|= 0x00000c00;
14675 put_thumb32_insn (buf
, insn
);
14676 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
14680 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
14682 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
14685 if (fragp
->fr_var
== 4)
14687 insn
= THUMB_OP32 (opcode
);
14688 insn
|= (old_op
& 0xf0) << 4;
14689 put_thumb32_insn (buf
, insn
);
14690 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
14694 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
14695 exp
.X_add_number
-= 4;
14703 if (fragp
->fr_var
== 4)
14705 int r0off
= (opcode
== T_MNEM_mov
14706 || opcode
== T_MNEM_movs
) ? 0 : 8;
14707 insn
= THUMB_OP32 (opcode
);
14708 insn
= (insn
& 0xe1ffffff) | 0x10000000;
14709 insn
|= (old_op
& 0x700) << r0off
;
14710 put_thumb32_insn (buf
, insn
);
14711 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
14715 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
14720 if (fragp
->fr_var
== 4)
14722 insn
= THUMB_OP32(opcode
);
14723 put_thumb32_insn (buf
, insn
);
14724 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
14727 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
14731 if (fragp
->fr_var
== 4)
14733 insn
= THUMB_OP32(opcode
);
14734 insn
|= (old_op
& 0xf00) << 14;
14735 put_thumb32_insn (buf
, insn
);
14736 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
14739 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
14742 case T_MNEM_add_sp
:
14743 case T_MNEM_add_pc
:
14744 case T_MNEM_inc_sp
:
14745 case T_MNEM_dec_sp
:
14746 if (fragp
->fr_var
== 4)
14748 /* ??? Choose between add and addw. */
14749 insn
= THUMB_OP32 (opcode
);
14750 insn
|= (old_op
& 0xf0) << 4;
14751 put_thumb32_insn (buf
, insn
);
14752 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
14755 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
14763 if (fragp
->fr_var
== 4)
14765 insn
= THUMB_OP32 (opcode
);
14766 insn
|= (old_op
& 0xf0) << 4;
14767 insn
|= (old_op
& 0xf) << 16;
14768 put_thumb32_insn (buf
, insn
);
14769 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
14772 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
14778 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
14780 fixp
->fx_file
= fragp
->fr_file
;
14781 fixp
->fx_line
= fragp
->fr_line
;
14782 fragp
->fr_fix
+= fragp
->fr_var
;
14785 /* Return the size of a relaxable immediate operand instruction.
14786 SHIFT and SIZE specify the form of the allowable immediate. */
14788 relax_immediate (fragS
*fragp
, int size
, int shift
)
14794 /* ??? Should be able to do better than this. */
14795 if (fragp
->fr_symbol
)
14798 low
= (1 << shift
) - 1;
14799 mask
= (1 << (shift
+ size
)) - (1 << shift
);
14800 offset
= fragp
->fr_offset
;
14801 /* Force misaligned offsets to 32-bit variant. */
14804 if (offset
& ~mask
)
14809 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14812 relax_adr (fragS
*fragp
, asection
*sec
)
14817 /* Assume worst case for symbols not known to be in the same section. */
14818 if (!S_IS_DEFINED(fragp
->fr_symbol
)
14819 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
14822 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
14823 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
14824 addr
= (addr
+ 4) & ~3;
14825 /* Fix the insn as the 4-byte version if the target address is not
14826 sufficiently aligned. This is prevents an infinite loop when two
14827 instructions have contradictory range/alignment requirements. */
14831 if (val
< 0 || val
> 1020)
14836 /* Return the size of a relaxable add/sub immediate instruction. */
14838 relax_addsub (fragS
*fragp
, asection
*sec
)
14843 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
14844 op
= bfd_get_16(sec
->owner
, buf
);
14845 if ((op
& 0xf) == ((op
>> 4) & 0xf))
14846 return relax_immediate (fragp
, 8, 0);
14848 return relax_immediate (fragp
, 3, 0);
14852 /* Return the size of a relaxable branch instruction. BITS is the
14853 size of the offset field in the narrow instruction. */
14856 relax_branch (fragS
*fragp
, asection
*sec
, int bits
)
14862 /* Assume worst case for symbols not known to be in the same section. */
14863 if (!S_IS_DEFINED(fragp
->fr_symbol
)
14864 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
14867 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
14868 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
14871 /* Offset is a signed value *2 */
14873 if (val
>= limit
|| val
< -limit
)
14879 /* Relax a machine dependent frag. This returns the amount by which
14880 the current size of the frag should change. */
14883 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch ATTRIBUTE_UNUSED
)
14888 oldsize
= fragp
->fr_var
;
14889 switch (fragp
->fr_subtype
)
14891 case T_MNEM_ldr_pc2
:
14892 newsize
= relax_adr(fragp
, sec
);
14894 case T_MNEM_ldr_pc
:
14895 case T_MNEM_ldr_sp
:
14896 case T_MNEM_str_sp
:
14897 newsize
= relax_immediate(fragp
, 8, 2);
14901 newsize
= relax_immediate(fragp
, 5, 2);
14905 newsize
= relax_immediate(fragp
, 5, 1);
14909 newsize
= relax_immediate(fragp
, 5, 0);
14912 newsize
= relax_adr(fragp
, sec
);
14918 newsize
= relax_immediate(fragp
, 8, 0);
14921 newsize
= relax_branch(fragp
, sec
, 11);
14924 newsize
= relax_branch(fragp
, sec
, 8);
14926 case T_MNEM_add_sp
:
14927 case T_MNEM_add_pc
:
14928 newsize
= relax_immediate (fragp
, 8, 2);
14930 case T_MNEM_inc_sp
:
14931 case T_MNEM_dec_sp
:
14932 newsize
= relax_immediate (fragp
, 7, 2);
14938 newsize
= relax_addsub (fragp
, sec
);
14945 fragp
->fr_var
= -newsize
;
14946 md_convert_frag (sec
->owner
, sec
, fragp
);
14948 return -(newsize
+ oldsize
);
14950 fragp
->fr_var
= newsize
;
14951 return newsize
- oldsize
;
14954 /* Round up a section size to the appropriate boundary. */
14957 md_section_align (segT segment ATTRIBUTE_UNUSED
,
14963 /* Round all sects to multiple of 4. */
14964 return (size
+ 3) & ~3;
14968 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
14969 of an rs_align_code fragment. */
14972 arm_handle_align (fragS
* fragP
)
14974 static char const arm_noop
[4] = { 0x00, 0x00, 0xa0, 0xe1 };
14975 static char const thumb_noop
[2] = { 0xc0, 0x46 };
14976 static char const arm_bigend_noop
[4] = { 0xe1, 0xa0, 0x00, 0x00 };
14977 static char const thumb_bigend_noop
[2] = { 0x46, 0xc0 };
14979 int bytes
, fix
, noop_size
;
14983 if (fragP
->fr_type
!= rs_align_code
)
14986 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
14987 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
14990 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
14991 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
14993 if (fragP
->tc_frag_data
)
14995 if (target_big_endian
)
14996 noop
= thumb_bigend_noop
;
14999 noop_size
= sizeof (thumb_noop
);
15003 if (target_big_endian
)
15004 noop
= arm_bigend_noop
;
15007 noop_size
= sizeof (arm_noop
);
15010 if (bytes
& (noop_size
- 1))
15012 fix
= bytes
& (noop_size
- 1);
15013 memset (p
, 0, fix
);
15018 while (bytes
>= noop_size
)
15020 memcpy (p
, noop
, noop_size
);
15022 bytes
-= noop_size
;
15026 fragP
->fr_fix
+= fix
;
15027 fragP
->fr_var
= noop_size
;
15030 /* Called from md_do_align. Used to create an alignment
15031 frag in a code section. */
15034 arm_frag_align_code (int n
, int max
)
15038 /* We assume that there will never be a requirement
15039 to support alignments greater than 32 bytes. */
15040 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
15041 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15043 p
= frag_var (rs_align_code
,
15044 MAX_MEM_FOR_RS_ALIGN_CODE
,
15046 (relax_substateT
) max
,
15053 /* Perform target specific initialisation of a frag. */
15056 arm_init_frag (fragS
* fragP
)
15058 /* Record whether this frag is in an ARM or a THUMB area. */
15059 fragP
->tc_frag_data
= thumb_mode
;
15063 /* When we change sections we need to issue a new mapping symbol. */
15066 arm_elf_change_section (void)
15069 segment_info_type
*seginfo
;
15071 /* Link an unlinked unwind index table section to the .text section. */
15072 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
15073 && elf_linked_to_section (now_seg
) == NULL
)
15074 elf_linked_to_section (now_seg
) = text_section
;
15076 if (!SEG_NORMAL (now_seg
))
15079 flags
= bfd_get_section_flags (stdoutput
, now_seg
);
15081 /* We can ignore sections that only contain debug info. */
15082 if ((flags
& SEC_ALLOC
) == 0)
15085 seginfo
= seg_info (now_seg
);
15086 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
15087 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
15091 arm_elf_section_type (const char * str
, size_t len
)
15093 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
15094 return SHT_ARM_EXIDX
;
15099 /* Code to deal with unwinding tables. */
15101 static void add_unwind_adjustsp (offsetT
);
15103 /* Cenerate and deferred unwind frame offset. */
15106 flush_pending_unwind (void)
15110 offset
= unwind
.pending_offset
;
15111 unwind
.pending_offset
= 0;
15113 add_unwind_adjustsp (offset
);
15116 /* Add an opcode to this list for this function. Two-byte opcodes should
15117 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15121 add_unwind_opcode (valueT op
, int length
)
15123 /* Add any deferred stack adjustment. */
15124 if (unwind
.pending_offset
)
15125 flush_pending_unwind ();
15127 unwind
.sp_restored
= 0;
15129 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
15131 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
15132 if (unwind
.opcodes
)
15133 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
15134 unwind
.opcode_alloc
);
15136 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
15141 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
15143 unwind
.opcode_count
++;
15147 /* Add unwind opcodes to adjust the stack pointer. */
15150 add_unwind_adjustsp (offsetT offset
)
15154 if (offset
> 0x200)
15156 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15161 /* Long form: 0xb2, uleb128. */
15162 /* This might not fit in a word so add the individual bytes,
15163 remembering the list is built in reverse order. */
15164 o
= (valueT
) ((offset
- 0x204) >> 2);
15166 add_unwind_opcode (0, 1);
15168 /* Calculate the uleb128 encoding of the offset. */
15172 bytes
[n
] = o
& 0x7f;
15178 /* Add the insn. */
15180 add_unwind_opcode (bytes
[n
- 1], 1);
15181 add_unwind_opcode (0xb2, 1);
15183 else if (offset
> 0x100)
15185 /* Two short opcodes. */
15186 add_unwind_opcode (0x3f, 1);
15187 op
= (offset
- 0x104) >> 2;
15188 add_unwind_opcode (op
, 1);
15190 else if (offset
> 0)
15192 /* Short opcode. */
15193 op
= (offset
- 4) >> 2;
15194 add_unwind_opcode (op
, 1);
15196 else if (offset
< 0)
15199 while (offset
> 0x100)
15201 add_unwind_opcode (0x7f, 1);
15204 op
= ((offset
- 4) >> 2) | 0x40;
15205 add_unwind_opcode (op
, 1);
15209 /* Finish the list of unwind opcodes for this function. */
15211 finish_unwind_opcodes (void)
15215 if (unwind
.fp_used
)
15217 /* Adjust sp as necessary. */
15218 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
15219 flush_pending_unwind ();
15221 /* After restoring sp from the frame pointer. */
15222 op
= 0x90 | unwind
.fp_reg
;
15223 add_unwind_opcode (op
, 1);
15226 flush_pending_unwind ();
15230 /* Start an exception table entry. If idx is nonzero this is an index table
15234 start_unwind_section (const segT text_seg
, int idx
)
15236 const char * text_name
;
15237 const char * prefix
;
15238 const char * prefix_once
;
15239 const char * group_name
;
15243 size_t sec_name_len
;
15250 prefix
= ELF_STRING_ARM_unwind
;
15251 prefix_once
= ELF_STRING_ARM_unwind_once
;
15252 type
= SHT_ARM_EXIDX
;
15256 prefix
= ELF_STRING_ARM_unwind_info
;
15257 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
15258 type
= SHT_PROGBITS
;
15261 text_name
= segment_name (text_seg
);
15262 if (streq (text_name
, ".text"))
15265 if (strncmp (text_name
, ".gnu.linkonce.t.",
15266 strlen (".gnu.linkonce.t.")) == 0)
15268 prefix
= prefix_once
;
15269 text_name
+= strlen (".gnu.linkonce.t.");
15272 prefix_len
= strlen (prefix
);
15273 text_len
= strlen (text_name
);
15274 sec_name_len
= prefix_len
+ text_len
;
15275 sec_name
= xmalloc (sec_name_len
+ 1);
15276 memcpy (sec_name
, prefix
, prefix_len
);
15277 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
15278 sec_name
[prefix_len
+ text_len
] = '\0';
15284 /* Handle COMDAT group. */
15285 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
15287 group_name
= elf_group_name (text_seg
);
15288 if (group_name
== NULL
)
15290 as_bad ("Group section `%s' has no group signature",
15291 segment_name (text_seg
));
15292 ignore_rest_of_line ();
15295 flags
|= SHF_GROUP
;
15299 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
15301 /* Set the setion link for index tables. */
15303 elf_linked_to_section (now_seg
) = text_seg
;
15307 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15308 personality routine data. Returns zero, or the index table value for
15309 and inline entry. */
15312 create_unwind_entry (int have_data
)
15317 /* The current word of data. */
15319 /* The number of bytes left in this word. */
15322 finish_unwind_opcodes ();
15324 /* Remember the current text section. */
15325 unwind
.saved_seg
= now_seg
;
15326 unwind
.saved_subseg
= now_subseg
;
15328 start_unwind_section (now_seg
, 0);
15330 if (unwind
.personality_routine
== NULL
)
15332 if (unwind
.personality_index
== -2)
15335 as_bad (_("handerdata in cantunwind frame"));
15336 return 1; /* EXIDX_CANTUNWIND. */
15339 /* Use a default personality routine if none is specified. */
15340 if (unwind
.personality_index
== -1)
15342 if (unwind
.opcode_count
> 3)
15343 unwind
.personality_index
= 1;
15345 unwind
.personality_index
= 0;
15348 /* Space for the personality routine entry. */
15349 if (unwind
.personality_index
== 0)
15351 if (unwind
.opcode_count
> 3)
15352 as_bad (_("too many unwind opcodes for personality routine 0"));
15356 /* All the data is inline in the index table. */
15359 while (unwind
.opcode_count
> 0)
15361 unwind
.opcode_count
--;
15362 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
15366 /* Pad with "finish" opcodes. */
15368 data
= (data
<< 8) | 0xb0;
15375 /* We get two opcodes "free" in the first word. */
15376 size
= unwind
.opcode_count
- 2;
15379 /* An extra byte is required for the opcode count. */
15380 size
= unwind
.opcode_count
+ 1;
15382 size
= (size
+ 3) >> 2;
15384 as_bad (_("too many unwind opcodes"));
15386 frag_align (2, 0, 0);
15387 record_alignment (now_seg
, 2);
15388 unwind
.table_entry
= expr_build_dot ();
15390 /* Allocate the table entry. */
15391 ptr
= frag_more ((size
<< 2) + 4);
15392 where
= frag_now_fix () - ((size
<< 2) + 4);
15394 switch (unwind
.personality_index
)
15397 /* ??? Should this be a PLT generating relocation? */
15398 /* Custom personality routine. */
15399 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
15400 BFD_RELOC_ARM_PREL31
);
15405 /* Set the first byte to the number of additional words. */
15410 /* ABI defined personality routines. */
15412 /* Three opcodes bytes are packed into the first word. */
15419 /* The size and first two opcode bytes go in the first word. */
15420 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
15425 /* Should never happen. */
15429 /* Pack the opcodes into words (MSB first), reversing the list at the same
15431 while (unwind
.opcode_count
> 0)
15435 md_number_to_chars (ptr
, data
, 4);
15440 unwind
.opcode_count
--;
15442 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
15445 /* Finish off the last word. */
15448 /* Pad with "finish" opcodes. */
15450 data
= (data
<< 8) | 0xb0;
15452 md_number_to_chars (ptr
, data
, 4);
15457 /* Add an empty descriptor if there is no user-specified data. */
15458 ptr
= frag_more (4);
15459 md_number_to_chars (ptr
, 0, 4);
15465 /* Convert REGNAME to a DWARF-2 register number. */
15468 tc_arm_regname_to_dw2regnum (const char *regname
)
15470 int reg
= arm_reg_parse ((char **) ®name
, REG_TYPE_RN
);
15478 /* Initialize the DWARF-2 unwind information for this procedure. */
15481 tc_arm_frame_initial_instructions (void)
15483 cfi_add_CFA_def_cfa (REG_SP
, 0);
15485 #endif /* OBJ_ELF */
15488 /* MD interface: Symbol and relocation handling. */
15490 /* Return the address within the segment that a PC-relative fixup is
15491 relative to. For ARM, PC-relative fixups applied to instructions
15492 are generally relative to the location of the fixup plus 8 bytes.
15493 Thumb branches are offset by 4, and Thumb loads relative to PC
15494 require special handling. */
15497 md_pcrel_from_section (fixS
* fixP
, segT seg
)
15499 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
15501 /* If this is pc-relative and we are going to emit a relocation
15502 then we just want to put out any pipeline compensation that the linker
15503 will need. Otherwise we want to use the calculated base. */
15505 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
15506 || arm_force_relocation (fixP
)))
15509 switch (fixP
->fx_r_type
)
15511 /* PC relative addressing on the Thumb is slightly odd as the
15512 bottom two bits of the PC are forced to zero for the
15513 calculation. This happens *after* application of the
15514 pipeline offset. However, Thumb adrl already adjusts for
15515 this, so we need not do it again. */
15516 case BFD_RELOC_ARM_THUMB_ADD
:
15519 case BFD_RELOC_ARM_THUMB_OFFSET
:
15520 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
15521 case BFD_RELOC_ARM_T32_ADD_PC12
:
15522 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
15523 return (base
+ 4) & ~3;
15525 /* Thumb branches are simply offset by +4. */
15526 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
15527 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
15528 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
15529 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
15530 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
15531 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
15532 case BFD_RELOC_THUMB_PCREL_BLX
:
15535 /* ARM mode branches are offset by +8. However, the Windows CE
15536 loader expects the relocation not to take this into account. */
15537 case BFD_RELOC_ARM_PCREL_BRANCH
:
15538 case BFD_RELOC_ARM_PCREL_CALL
:
15539 case BFD_RELOC_ARM_PCREL_JUMP
:
15540 case BFD_RELOC_ARM_PCREL_BLX
:
15541 case BFD_RELOC_ARM_PLT32
:
15548 /* ARM mode loads relative to PC are also offset by +8. Unlike
15549 branches, the Windows CE loader *does* expect the relocation
15550 to take this into account. */
15551 case BFD_RELOC_ARM_OFFSET_IMM
:
15552 case BFD_RELOC_ARM_OFFSET_IMM8
:
15553 case BFD_RELOC_ARM_HWLITERAL
:
15554 case BFD_RELOC_ARM_LITERAL
:
15555 case BFD_RELOC_ARM_CP_OFF_IMM
:
15559 /* Other PC-relative relocations are un-offset. */
15565 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15566 Otherwise we have no need to default values of symbols. */
15569 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
15572 if (name
[0] == '_' && name
[1] == 'G'
15573 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
15577 if (symbol_find (name
))
15578 as_bad ("GOT already in the symbol table");
15580 GOT_symbol
= symbol_new (name
, undefined_section
,
15581 (valueT
) 0, & zero_address_frag
);
15591 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15592 computed as two separate immediate values, added together. We
15593 already know that this value cannot be computed by just one ARM
15596 static unsigned int
15597 validate_immediate_twopart (unsigned int val
,
15598 unsigned int * highpart
)
15603 for (i
= 0; i
< 32; i
+= 2)
15604 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
15610 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
15612 else if (a
& 0xff0000)
15614 if (a
& 0xff000000)
15616 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
15620 assert (a
& 0xff000000);
15621 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
15624 return (a
& 0xff) | (i
<< 7);
15631 validate_offset_imm (unsigned int val
, int hwse
)
15633 if ((hwse
&& val
> 255) || val
> 4095)
15638 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15639 negative immediate constant by altering the instruction. A bit of
15644 by inverting the second operand, and
15647 by negating the second operand. */
15650 negate_data_op (unsigned long * instruction
,
15651 unsigned long value
)
15654 unsigned long negated
, inverted
;
15656 negated
= encode_arm_immediate (-value
);
15657 inverted
= encode_arm_immediate (~value
);
15659 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
15662 /* First negates. */
15663 case OPCODE_SUB
: /* ADD <-> SUB */
15664 new_inst
= OPCODE_ADD
;
15669 new_inst
= OPCODE_SUB
;
15673 case OPCODE_CMP
: /* CMP <-> CMN */
15674 new_inst
= OPCODE_CMN
;
15679 new_inst
= OPCODE_CMP
;
15683 /* Now Inverted ops. */
15684 case OPCODE_MOV
: /* MOV <-> MVN */
15685 new_inst
= OPCODE_MVN
;
15690 new_inst
= OPCODE_MOV
;
15694 case OPCODE_AND
: /* AND <-> BIC */
15695 new_inst
= OPCODE_BIC
;
15700 new_inst
= OPCODE_AND
;
15704 case OPCODE_ADC
: /* ADC <-> SBC */
15705 new_inst
= OPCODE_SBC
;
15710 new_inst
= OPCODE_ADC
;
15714 /* We cannot do anything. */
15719 if (value
== (unsigned) FAIL
)
15722 *instruction
&= OPCODE_MASK
;
15723 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
15727 /* Like negate_data_op, but for Thumb-2. */
15729 static unsigned int
15730 thumb32_negate_data_op (offsetT
*instruction
, offsetT value
)
15734 offsetT negated
, inverted
;
15736 negated
= encode_thumb32_immediate (-value
);
15737 inverted
= encode_thumb32_immediate (~value
);
15739 rd
= (*instruction
>> 8) & 0xf;
15740 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
15743 /* ADD <-> SUB. Includes CMP <-> CMN. */
15744 case T2_OPCODE_SUB
:
15745 new_inst
= T2_OPCODE_ADD
;
15749 case T2_OPCODE_ADD
:
15750 new_inst
= T2_OPCODE_SUB
;
15754 /* ORR <-> ORN. Includes MOV <-> MVN. */
15755 case T2_OPCODE_ORR
:
15756 new_inst
= T2_OPCODE_ORN
;
15760 case T2_OPCODE_ORN
:
15761 new_inst
= T2_OPCODE_ORR
;
15765 /* AND <-> BIC. TST has no inverted equivalent. */
15766 case T2_OPCODE_AND
:
15767 new_inst
= T2_OPCODE_BIC
;
15774 case T2_OPCODE_BIC
:
15775 new_inst
= T2_OPCODE_AND
;
15780 case T2_OPCODE_ADC
:
15781 new_inst
= T2_OPCODE_SBC
;
15785 case T2_OPCODE_SBC
:
15786 new_inst
= T2_OPCODE_ADC
;
15790 /* We cannot do anything. */
15798 *instruction
&= T2_OPCODE_MASK
;
15799 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
15803 /* Read a 32-bit thumb instruction from buf. */
15804 static unsigned long
15805 get_thumb32_insn (char * buf
)
15807 unsigned long insn
;
15808 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
15809 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
15815 md_apply_fix (fixS
* fixP
,
15819 offsetT value
= * valP
;
15821 unsigned int newimm
;
15822 unsigned long temp
;
15824 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
15826 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
15828 /* Note whether this will delete the relocation. */
15829 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
15832 /* On a 64-bit host, silently truncate 'value' to 32 bits for
15833 consistency with the behavior on 32-bit hosts. Remember value
15835 value
&= 0xffffffff;
15836 value
^= 0x80000000;
15837 value
-= 0x80000000;
15840 fixP
->fx_addnumber
= value
;
15842 /* Same treatment for fixP->fx_offset. */
15843 fixP
->fx_offset
&= 0xffffffff;
15844 fixP
->fx_offset
^= 0x80000000;
15845 fixP
->fx_offset
-= 0x80000000;
15847 switch (fixP
->fx_r_type
)
15849 case BFD_RELOC_NONE
:
15850 /* This will need to go in the object file. */
15854 case BFD_RELOC_ARM_IMMEDIATE
:
15855 /* We claim that this fixup has been processed here,
15856 even if in fact we generate an error because we do
15857 not have a reloc for it, so tc_gen_reloc will reject it. */
15861 && ! S_IS_DEFINED (fixP
->fx_addsy
))
15863 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15864 _("undefined symbol %s used as an immediate value"),
15865 S_GET_NAME (fixP
->fx_addsy
));
15869 newimm
= encode_arm_immediate (value
);
15870 temp
= md_chars_to_number (buf
, INSN_SIZE
);
15872 /* If the instruction will fail, see if we can fix things up by
15873 changing the opcode. */
15874 if (newimm
== (unsigned int) FAIL
15875 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
15877 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15878 _("invalid constant (%lx) after fixup"),
15879 (unsigned long) value
);
15883 newimm
|= (temp
& 0xfffff000);
15884 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
15887 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
15889 unsigned int highpart
= 0;
15890 unsigned int newinsn
= 0xe1a00000; /* nop. */
15892 newimm
= encode_arm_immediate (value
);
15893 temp
= md_chars_to_number (buf
, INSN_SIZE
);
15895 /* If the instruction will fail, see if we can fix things up by
15896 changing the opcode. */
15897 if (newimm
== (unsigned int) FAIL
15898 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
15900 /* No ? OK - try using two ADD instructions to generate
15902 newimm
= validate_immediate_twopart (value
, & highpart
);
15904 /* Yes - then make sure that the second instruction is
15906 if (newimm
!= (unsigned int) FAIL
)
15908 /* Still No ? Try using a negated value. */
15909 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
15910 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
15911 /* Otherwise - give up. */
15914 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15915 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
15920 /* Replace the first operand in the 2nd instruction (which
15921 is the PC) with the destination register. We have
15922 already added in the PC in the first instruction and we
15923 do not want to do it again. */
15924 newinsn
&= ~ 0xf0000;
15925 newinsn
|= ((newinsn
& 0x0f000) << 4);
15928 newimm
|= (temp
& 0xfffff000);
15929 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
15931 highpart
|= (newinsn
& 0xfffff000);
15932 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
15936 case BFD_RELOC_ARM_OFFSET_IMM
:
15937 if (!fixP
->fx_done
&& seg
->use_rela_p
)
15940 case BFD_RELOC_ARM_LITERAL
:
15946 if (validate_offset_imm (value
, 0) == FAIL
)
15948 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
15949 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15950 _("invalid literal constant: pool needs to be closer"));
15952 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15953 _("bad immediate value for offset (%ld)"),
15958 newval
= md_chars_to_number (buf
, INSN_SIZE
);
15959 newval
&= 0xff7ff000;
15960 newval
|= value
| (sign
? INDEX_UP
: 0);
15961 md_number_to_chars (buf
, newval
, INSN_SIZE
);
15964 case BFD_RELOC_ARM_OFFSET_IMM8
:
15965 case BFD_RELOC_ARM_HWLITERAL
:
15971 if (validate_offset_imm (value
, 1) == FAIL
)
15973 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
15974 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15975 _("invalid literal constant: pool needs to be closer"));
15977 as_bad (_("bad immediate value for half-word offset (%ld)"),
15982 newval
= md_chars_to_number (buf
, INSN_SIZE
);
15983 newval
&= 0xff7ff0f0;
15984 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
15985 md_number_to_chars (buf
, newval
, INSN_SIZE
);
15988 case BFD_RELOC_ARM_T32_OFFSET_U8
:
15989 if (value
< 0 || value
> 1020 || value
% 4 != 0)
15990 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15991 _("bad immediate value for offset (%ld)"), (long) value
);
15994 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
15996 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
15999 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
16000 /* This is a complicated relocation used for all varieties of Thumb32
16001 load/store instruction with immediate offset:
16003 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16004 *4, optional writeback(W)
16005 (doubleword load/store)
16007 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16008 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16009 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16010 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16011 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16013 Uppercase letters indicate bits that are already encoded at
16014 this point. Lowercase letters are our problem. For the
16015 second block of instructions, the secondary opcode nybble
16016 (bits 8..11) is present, and bit 23 is zero, even if this is
16017 a PC-relative operation. */
16018 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16020 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
16022 if ((newval
& 0xf0000000) == 0xe0000000)
16024 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16026 newval
|= (1 << 23);
16029 if (value
% 4 != 0)
16031 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16032 _("offset not a multiple of 4"));
16038 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16039 _("offset out of range"));
16044 else if ((newval
& 0x000f0000) == 0x000f0000)
16046 /* PC-relative, 12-bit offset. */
16048 newval
|= (1 << 23);
16053 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16054 _("offset out of range"));
16059 else if ((newval
& 0x00000100) == 0x00000100)
16061 /* Writeback: 8-bit, +/- offset. */
16063 newval
|= (1 << 9);
16068 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16069 _("offset out of range"));
16074 else if ((newval
& 0x00000f00) == 0x00000e00)
16076 /* T-instruction: positive 8-bit offset. */
16077 if (value
< 0 || value
> 0xff)
16079 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16080 _("offset out of range"));
16088 /* Positive 12-bit or negative 8-bit offset. */
16092 newval
|= (1 << 23);
16102 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16103 _("offset out of range"));
16110 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
16111 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
16114 case BFD_RELOC_ARM_SHIFT_IMM
:
16115 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16116 if (((unsigned long) value
) > 32
16118 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
16120 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16121 _("shift expression is too large"));
16126 /* Shifts of zero must be done as lsl. */
16128 else if (value
== 32)
16130 newval
&= 0xfffff07f;
16131 newval
|= (value
& 0x1f) << 7;
16132 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16135 case BFD_RELOC_ARM_T32_IMMEDIATE
:
16136 case BFD_RELOC_ARM_T32_IMM12
:
16137 case BFD_RELOC_ARM_T32_ADD_PC12
:
16138 /* We claim that this fixup has been processed here,
16139 even if in fact we generate an error because we do
16140 not have a reloc for it, so tc_gen_reloc will reject it. */
16144 && ! S_IS_DEFINED (fixP
->fx_addsy
))
16146 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16147 _("undefined symbol %s used as an immediate value"),
16148 S_GET_NAME (fixP
->fx_addsy
));
16152 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16154 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
16156 /* FUTURE: Implement analogue of negate_data_op for T32. */
16157 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
)
16159 newimm
= encode_thumb32_immediate (value
);
16160 if (newimm
== (unsigned int) FAIL
)
16161 newimm
= thumb32_negate_data_op (&newval
, value
);
16165 /* 12 bit immediate for addw/subw. */
16169 newval
^= 0x00a00000;
16172 newimm
= (unsigned int) FAIL
;
16177 if (newimm
== (unsigned int)FAIL
)
16179 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16180 _("invalid constant (%lx) after fixup"),
16181 (unsigned long) value
);
16185 newval
|= (newimm
& 0x800) << 15;
16186 newval
|= (newimm
& 0x700) << 4;
16187 newval
|= (newimm
& 0x0ff);
16189 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
16190 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
16193 case BFD_RELOC_ARM_SMC
:
16194 if (((unsigned long) value
) > 0xffff)
16195 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16196 _("invalid smc expression"));
16197 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16198 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
16199 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16202 case BFD_RELOC_ARM_SWI
:
16203 if (fixP
->tc_fix_data
!= 0)
16205 if (((unsigned long) value
) > 0xff)
16206 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16207 _("invalid swi expression"));
16208 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16210 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16214 if (((unsigned long) value
) > 0x00ffffff)
16215 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16216 _("invalid swi expression"));
16217 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16219 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16223 case BFD_RELOC_ARM_MULTI
:
16224 if (((unsigned long) value
) > 0xffff)
16225 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16226 _("invalid expression in load/store multiple"));
16227 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
16228 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16232 case BFD_RELOC_ARM_PCREL_CALL
:
16233 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16234 if ((newval
& 0xf0000000) == 0xf0000000)
16238 goto arm_branch_common
;
16240 case BFD_RELOC_ARM_PCREL_JUMP
:
16241 case BFD_RELOC_ARM_PLT32
:
16243 case BFD_RELOC_ARM_PCREL_BRANCH
:
16245 goto arm_branch_common
;
16247 case BFD_RELOC_ARM_PCREL_BLX
:
16250 /* We are going to store value (shifted right by two) in the
16251 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16252 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16253 also be be clear. */
16255 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16256 _("misaligned branch destination"));
16257 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
16258 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
16259 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16260 _("branch out of range"));
16262 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16264 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16265 newval
|= (value
>> 2) & 0x00ffffff;
16266 /* Set the H bit on BLX instructions. */
16270 newval
|= 0x01000000;
16272 newval
&= ~0x01000000;
16274 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16278 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CZB */
16279 /* CZB can only branch forward. */
16281 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16282 _("branch out of range"));
16284 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16286 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16287 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
16288 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16292 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
16293 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
16294 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16295 _("branch out of range"));
16297 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16299 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16300 newval
|= (value
& 0x1ff) >> 1;
16301 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16305 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
16306 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
16307 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16308 _("branch out of range"));
16310 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16312 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16313 newval
|= (value
& 0xfff) >> 1;
16314 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16318 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
16319 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
16320 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16321 _("conditional branch out of range"));
16323 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16326 addressT S
, J1
, J2
, lo
, hi
;
16328 S
= (value
& 0x00100000) >> 20;
16329 J2
= (value
& 0x00080000) >> 19;
16330 J1
= (value
& 0x00040000) >> 18;
16331 hi
= (value
& 0x0003f000) >> 12;
16332 lo
= (value
& 0x00000ffe) >> 1;
16334 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16335 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16336 newval
|= (S
<< 10) | hi
;
16337 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
16338 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16339 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
16343 case BFD_RELOC_THUMB_PCREL_BLX
:
16344 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
16345 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
16346 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16347 _("branch out of range"));
16349 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
16350 /* For a BLX instruction, make sure that the relocation is rounded up
16351 to a word boundary. This follows the semantics of the instruction
16352 which specifies that bit 1 of the target address will come from bit
16353 1 of the base address. */
16354 value
= (value
+ 1) & ~ 1;
16356 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16360 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16361 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16362 newval
|= (value
& 0x7fffff) >> 12;
16363 newval2
|= (value
& 0xfff) >> 1;
16364 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16365 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
16369 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
16370 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
16371 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16372 _("branch out of range"));
16374 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16377 addressT S
, I1
, I2
, lo
, hi
;
16379 S
= (value
& 0x01000000) >> 24;
16380 I1
= (value
& 0x00800000) >> 23;
16381 I2
= (value
& 0x00400000) >> 22;
16382 hi
= (value
& 0x003ff000) >> 12;
16383 lo
= (value
& 0x00000ffe) >> 1;
16388 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16389 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16390 newval
|= (S
<< 10) | hi
;
16391 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
16392 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16393 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
16398 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16399 md_number_to_chars (buf
, value
, 1);
16403 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16404 md_number_to_chars (buf
, value
, 2);
16408 case BFD_RELOC_ARM_TLS_GD32
:
16409 case BFD_RELOC_ARM_TLS_LE32
:
16410 case BFD_RELOC_ARM_TLS_IE32
:
16411 case BFD_RELOC_ARM_TLS_LDM32
:
16412 case BFD_RELOC_ARM_TLS_LDO32
:
16413 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
16416 case BFD_RELOC_ARM_GOT32
:
16417 case BFD_RELOC_ARM_GOTOFF
:
16418 case BFD_RELOC_ARM_TARGET2
:
16419 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16420 md_number_to_chars (buf
, 0, 4);
16424 case BFD_RELOC_RVA
:
16426 case BFD_RELOC_ARM_TARGET1
:
16427 case BFD_RELOC_ARM_ROSEGREL32
:
16428 case BFD_RELOC_ARM_SBREL32
:
16429 case BFD_RELOC_32_PCREL
:
16430 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16431 md_number_to_chars (buf
, value
, 4);
16435 case BFD_RELOC_ARM_PREL31
:
16436 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16438 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
16439 if ((value
^ (value
>> 1)) & 0x40000000)
16441 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16442 _("rel31 relocation overflow"));
16444 newval
|= value
& 0x7fffffff;
16445 md_number_to_chars (buf
, newval
, 4);
16450 case BFD_RELOC_ARM_CP_OFF_IMM
:
16451 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
16452 if (value
< -1023 || value
> 1023 || (value
& 3))
16453 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16454 _("co-processor offset out of range"));
16459 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
16460 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
16461 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16463 newval
= get_thumb32_insn (buf
);
16464 newval
&= 0xff7fff00;
16465 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
16467 newval
&= ~WRITE_BACK
;
16468 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
16469 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
16470 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16472 put_thumb32_insn (buf
, newval
);
16475 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
16476 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
16477 if (value
< -255 || value
> 255)
16478 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16479 _("co-processor offset out of range"));
16480 goto cp_off_common
;
16482 case BFD_RELOC_ARM_THUMB_OFFSET
:
16483 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16484 /* Exactly what ranges, and where the offset is inserted depends
16485 on the type of instruction, we can establish this from the
16487 switch (newval
>> 12)
16489 case 4: /* PC load. */
16490 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16491 forced to zero for these loads; md_pcrel_from has already
16492 compensated for this. */
16494 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16495 _("invalid offset, target not word aligned (0x%08lX)"),
16496 (((unsigned long) fixP
->fx_frag
->fr_address
16497 + (unsigned long) fixP
->fx_where
) & ~3)
16498 + (unsigned long) value
);
16500 if (value
& ~0x3fc)
16501 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16502 _("invalid offset, value too big (0x%08lX)"),
16505 newval
|= value
>> 2;
16508 case 9: /* SP load/store. */
16509 if (value
& ~0x3fc)
16510 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16511 _("invalid offset, value too big (0x%08lX)"),
16513 newval
|= value
>> 2;
16516 case 6: /* Word load/store. */
16518 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16519 _("invalid offset, value too big (0x%08lX)"),
16521 newval
|= value
<< 4; /* 6 - 2. */
16524 case 7: /* Byte load/store. */
16526 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16527 _("invalid offset, value too big (0x%08lX)"),
16529 newval
|= value
<< 6;
16532 case 8: /* Halfword load/store. */
16534 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16535 _("invalid offset, value too big (0x%08lX)"),
16537 newval
|= value
<< 5; /* 6 - 1. */
16541 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16542 "Unable to process relocation for thumb opcode: %lx",
16543 (unsigned long) newval
);
16546 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16549 case BFD_RELOC_ARM_THUMB_ADD
:
16550 /* This is a complicated relocation, since we use it for all of
16551 the following immediate relocations:
16555 9bit ADD/SUB SP word-aligned
16556 10bit ADD PC/SP word-aligned
16558 The type of instruction being processed is encoded in the
16565 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16567 int rd
= (newval
>> 4) & 0xf;
16568 int rs
= newval
& 0xf;
16569 int subtract
= !!(newval
& 0x8000);
16571 /* Check for HI regs, only very restricted cases allowed:
16572 Adjusting SP, and using PC or SP to get an address. */
16573 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
16574 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
16575 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16576 _("invalid Hi register with immediate"));
16578 /* If value is negative, choose the opposite instruction. */
16582 subtract
= !subtract
;
16584 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16585 _("immediate value out of range"));
16590 if (value
& ~0x1fc)
16591 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16592 _("invalid immediate for stack address calculation"));
16593 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
16594 newval
|= value
>> 2;
16596 else if (rs
== REG_PC
|| rs
== REG_SP
)
16598 if (subtract
|| value
& ~0x3fc)
16599 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16600 _("invalid immediate for address calculation (value = 0x%08lX)"),
16601 (unsigned long) value
);
16602 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
16604 newval
|= value
>> 2;
16609 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16610 _("immediate value out of range"));
16611 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
16612 newval
|= (rd
<< 8) | value
;
16617 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16618 _("immediate value out of range"));
16619 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
16620 newval
|= rd
| (rs
<< 3) | (value
<< 6);
16623 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16626 case BFD_RELOC_ARM_THUMB_IMM
:
16627 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16628 if (value
< 0 || value
> 255)
16629 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16630 _("invalid immediate: %ld is too large"),
16633 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16636 case BFD_RELOC_ARM_THUMB_SHIFT
:
16637 /* 5bit shift value (0..32). LSL cannot take 32. */
16638 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
16639 temp
= newval
& 0xf800;
16640 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
16641 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16642 _("invalid shift value: %ld"), (long) value
);
16643 /* Shifts of zero must be encoded as LSL. */
16645 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
16646 /* Shifts of 32 are encoded as zero. */
16647 else if (value
== 32)
16649 newval
|= value
<< 6;
16650 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16653 case BFD_RELOC_VTABLE_INHERIT
:
16654 case BFD_RELOC_VTABLE_ENTRY
:
16658 case BFD_RELOC_UNUSED
:
16660 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16661 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
16665 /* Translate internal representation of relocation info to BFD target
16669 tc_gen_reloc (asection
*section
, fixS
*fixp
)
16672 bfd_reloc_code_real_type code
;
16674 reloc
= xmalloc (sizeof (arelent
));
16676 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
16677 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
16678 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
16680 if (fixp
->fx_pcrel
)
16682 if (section
->use_rela_p
)
16683 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
16685 fixp
->fx_offset
= reloc
->address
;
16687 reloc
->addend
= fixp
->fx_offset
;
16689 switch (fixp
->fx_r_type
)
16692 if (fixp
->fx_pcrel
)
16694 code
= BFD_RELOC_8_PCREL
;
16699 if (fixp
->fx_pcrel
)
16701 code
= BFD_RELOC_16_PCREL
;
16706 if (fixp
->fx_pcrel
)
16708 code
= BFD_RELOC_32_PCREL
;
16712 case BFD_RELOC_NONE
:
16713 case BFD_RELOC_ARM_PCREL_BRANCH
:
16714 case BFD_RELOC_ARM_PCREL_BLX
:
16715 case BFD_RELOC_RVA
:
16716 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
16717 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
16718 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
16719 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
16720 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
16721 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
16722 case BFD_RELOC_THUMB_PCREL_BLX
:
16723 case BFD_RELOC_VTABLE_ENTRY
:
16724 case BFD_RELOC_VTABLE_INHERIT
:
16725 code
= fixp
->fx_r_type
;
16728 case BFD_RELOC_ARM_LITERAL
:
16729 case BFD_RELOC_ARM_HWLITERAL
:
16730 /* If this is called then the a literal has
16731 been referenced across a section boundary. */
16732 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16733 _("literal referenced across section boundary"));
16737 case BFD_RELOC_ARM_GOT32
:
16738 case BFD_RELOC_ARM_GOTOFF
:
16739 case BFD_RELOC_ARM_PLT32
:
16740 case BFD_RELOC_ARM_TARGET1
:
16741 case BFD_RELOC_ARM_ROSEGREL32
:
16742 case BFD_RELOC_ARM_SBREL32
:
16743 case BFD_RELOC_ARM_PREL31
:
16744 case BFD_RELOC_ARM_TARGET2
:
16745 case BFD_RELOC_ARM_TLS_LE32
:
16746 case BFD_RELOC_ARM_TLS_LDO32
:
16747 case BFD_RELOC_ARM_PCREL_CALL
:
16748 case BFD_RELOC_ARM_PCREL_JUMP
:
16749 code
= fixp
->fx_r_type
;
16752 case BFD_RELOC_ARM_TLS_GD32
:
16753 case BFD_RELOC_ARM_TLS_IE32
:
16754 case BFD_RELOC_ARM_TLS_LDM32
:
16755 /* BFD will include the symbol's address in the addend.
16756 But we don't want that, so subtract it out again here. */
16757 if (!S_IS_COMMON (fixp
->fx_addsy
))
16758 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
16759 code
= fixp
->fx_r_type
;
16763 case BFD_RELOC_ARM_IMMEDIATE
:
16764 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16765 _("internal relocation (type: IMMEDIATE) not fixed up"));
16768 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
16769 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16770 _("ADRL used for a symbol not defined in the same file"));
16773 case BFD_RELOC_ARM_OFFSET_IMM
:
16774 if (section
->use_rela_p
)
16776 code
= fixp
->fx_r_type
;
16780 if (fixp
->fx_addsy
!= NULL
16781 && !S_IS_DEFINED (fixp
->fx_addsy
)
16782 && S_IS_LOCAL (fixp
->fx_addsy
))
16784 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16785 _("undefined local label `%s'"),
16786 S_GET_NAME (fixp
->fx_addsy
));
16790 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16791 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
16798 switch (fixp
->fx_r_type
)
16800 case BFD_RELOC_NONE
: type
= "NONE"; break;
16801 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
16802 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
16803 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
16804 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
16805 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
16806 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
16807 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
16808 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
16809 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
16810 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
16811 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
16812 default: type
= _("<unknown>"); break;
16814 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16815 _("cannot represent %s relocation in this object file format"),
16822 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
16824 && fixp
->fx_addsy
== GOT_symbol
)
16826 code
= BFD_RELOC_ARM_GOTPC
;
16827 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
16831 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
16833 if (reloc
->howto
== NULL
)
16835 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16836 _("cannot represent %s relocation in this object file format"),
16837 bfd_get_reloc_code_name (code
));
16841 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
16842 vtable entry to be used in the relocation's section offset. */
16843 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
16844 reloc
->address
= fixp
->fx_offset
;
16849 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
16852 cons_fix_new_arm (fragS
* frag
,
16857 bfd_reloc_code_real_type type
;
16861 FIXME: @@ Should look at CPU word size. */
16865 type
= BFD_RELOC_8
;
16868 type
= BFD_RELOC_16
;
16872 type
= BFD_RELOC_32
;
16875 type
= BFD_RELOC_64
;
16879 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
16882 #if defined OBJ_COFF || defined OBJ_ELF
16884 arm_validate_fix (fixS
* fixP
)
16886 /* If the destination of the branch is a defined symbol which does not have
16887 the THUMB_FUNC attribute, then we must be calling a function which has
16888 the (interfacearm) attribute. We look for the Thumb entry point to that
16889 function and change the branch to refer to that function instead. */
16890 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
16891 && fixP
->fx_addsy
!= NULL
16892 && S_IS_DEFINED (fixP
->fx_addsy
)
16893 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
16895 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
16901 arm_force_relocation (struct fix
* fixp
)
16903 #if defined (OBJ_COFF) && defined (TE_PE)
16904 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
16908 /* Resolve these relocations even if the symbol is extern or weak. */
16909 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
16910 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
16911 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
16912 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
16913 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
16914 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
16917 return generic_force_reloc (fixp
);
16921 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
16922 local labels from being added to the output symbol table when they
16923 are used with the ADRL pseudo op. The ADRL relocation should always
16924 be resolved before the binbary is emitted, so it is safe to say that
16925 it is adjustable. */
16928 arm_fix_adjustable (fixS
* fixP
)
16930 if (fixP
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
)
16937 /* Relocations against Thumb function names must be left unadjusted,
16938 so that the linker can use this information to correctly set the
16939 bottom bit of their addresses. The MIPS version of this function
16940 also prevents relocations that are mips-16 specific, but I do not
16941 know why it does this.
16944 There is one other problem that ought to be addressed here, but
16945 which currently is not: Taking the address of a label (rather
16946 than a function) and then later jumping to that address. Such
16947 addresses also ought to have their bottom bit set (assuming that
16948 they reside in Thumb code), but at the moment they will not. */
16951 arm_fix_adjustable (fixS
* fixP
)
16953 if (fixP
->fx_addsy
== NULL
)
16956 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
16957 && fixP
->fx_subsy
== NULL
)
16960 /* We need the symbol name for the VTABLE entries. */
16961 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
16962 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
16965 /* Don't allow symbols to be discarded on GOT related relocs. */
16966 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
16967 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
16968 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
16969 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
16970 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
16971 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
16972 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
16973 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
16974 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
16981 elf32_arm_target_format (void)
16984 return (target_big_endian
16985 ? "elf32-bigarm-symbian"
16986 : "elf32-littlearm-symbian");
16987 #elif defined (TE_VXWORKS)
16988 return (target_big_endian
16989 ? "elf32-bigarm-vxworks"
16990 : "elf32-littlearm-vxworks");
16992 if (target_big_endian
)
16993 return "elf32-bigarm";
16995 return "elf32-littlearm";
17000 armelf_frob_symbol (symbolS
* symp
,
17003 elf_frob_symbol (symp
, puntp
);
17007 /* MD interface: Finalization. */
17009 /* A good place to do this, although this was probably not intended
17010 for this kind of use. We need to dump the literal pool before
17011 references are made to a null symbol pointer. */
17016 literal_pool
* pool
;
17018 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
17020 /* Put it at the end of the relevent section. */
17021 subseg_set (pool
->section
, pool
->sub_section
);
17023 arm_elf_change_section ();
17029 /* Adjust the symbol table. This marks Thumb symbols as distinct from
17033 arm_adjust_symtab (void)
17038 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
17040 if (ARM_IS_THUMB (sym
))
17042 if (THUMB_IS_FUNC (sym
))
17044 /* Mark the symbol as a Thumb function. */
17045 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
17046 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
17047 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
17049 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
17050 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
17052 as_bad (_("%s: unexpected function type: %d"),
17053 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
17055 else switch (S_GET_STORAGE_CLASS (sym
))
17058 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
17061 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
17064 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
17072 if (ARM_IS_INTERWORK (sym
))
17073 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
17080 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
17082 if (ARM_IS_THUMB (sym
))
17084 elf_symbol_type
* elf_sym
;
17086 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
17087 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
17089 if (! bfd_is_arm_mapping_symbol_name (elf_sym
->symbol
.name
))
17091 /* If it's a .thumb_func, declare it as so,
17092 otherwise tag label as .code 16. */
17093 if (THUMB_IS_FUNC (sym
))
17094 elf_sym
->internal_elf_sym
.st_info
=
17095 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
17097 elf_sym
->internal_elf_sym
.st_info
=
17098 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
17105 /* MD interface: Initialization. */
17108 set_constant_flonums (void)
17112 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
17113 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
17123 if ( (arm_ops_hsh
= hash_new ()) == NULL
17124 || (arm_cond_hsh
= hash_new ()) == NULL
17125 || (arm_shift_hsh
= hash_new ()) == NULL
17126 || (arm_psr_hsh
= hash_new ()) == NULL
17127 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
17128 || (arm_reg_hsh
= hash_new ()) == NULL
17129 || (arm_reloc_hsh
= hash_new ()) == NULL
17130 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
17131 as_fatal (_("virtual memory exhausted"));
17133 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
17134 hash_insert (arm_ops_hsh
, insns
[i
].template, (PTR
) (insns
+ i
));
17135 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
17136 hash_insert (arm_cond_hsh
, conds
[i
].template, (PTR
) (conds
+ i
));
17137 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
17138 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (PTR
) (shift_names
+ i
));
17139 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
17140 hash_insert (arm_psr_hsh
, psrs
[i
].template, (PTR
) (psrs
+ i
));
17141 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
17142 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (PTR
) (v7m_psrs
+ i
));
17143 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
17144 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (PTR
) (reg_names
+ i
));
17146 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
17148 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
17149 (PTR
) (barrier_opt_names
+ i
));
17151 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
17152 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (PTR
) (reloc_names
+ i
));
17155 set_constant_flonums ();
17157 /* Set the cpu variant based on the command-line options. We prefer
17158 -mcpu= over -march= if both are set (as for GCC); and we prefer
17159 -mfpu= over any other way of setting the floating point unit.
17160 Use of legacy options with new options are faulted. */
17163 if (mcpu_cpu_opt
|| march_cpu_opt
)
17164 as_bad (_("use of old and new-style options to set CPU type"));
17166 mcpu_cpu_opt
= legacy_cpu
;
17168 else if (!mcpu_cpu_opt
)
17169 mcpu_cpu_opt
= march_cpu_opt
;
17174 as_bad (_("use of old and new-style options to set FPU type"));
17176 mfpu_opt
= legacy_fpu
;
17178 else if (!mfpu_opt
)
17180 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17181 /* Some environments specify a default FPU. If they don't, infer it
17182 from the processor. */
17184 mfpu_opt
= mcpu_fpu_opt
;
17186 mfpu_opt
= march_fpu_opt
;
17188 mfpu_opt
= &fpu_default
;
17195 mfpu_opt
= &fpu_default
;
17196 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
17197 mfpu_opt
= &fpu_arch_vfp_v2
;
17199 mfpu_opt
= &fpu_arch_fpa
;
17205 mcpu_cpu_opt
= &cpu_default
;
17206 selected_cpu
= cpu_default
;
17210 selected_cpu
= *mcpu_cpu_opt
;
17212 mcpu_cpu_opt
= &arm_arch_any
;
17215 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
17217 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
17219 #if defined OBJ_COFF || defined OBJ_ELF
17221 unsigned int flags
= 0;
17223 #if defined OBJ_ELF
17224 flags
= meabi_flags
;
17226 switch (meabi_flags
)
17228 case EF_ARM_EABI_UNKNOWN
:
17230 /* Set the flags in the private structure. */
17231 if (uses_apcs_26
) flags
|= F_APCS26
;
17232 if (support_interwork
) flags
|= F_INTERWORK
;
17233 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
17234 if (pic_code
) flags
|= F_PIC
;
17235 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
17236 flags
|= F_SOFT_FLOAT
;
17238 switch (mfloat_abi_opt
)
17240 case ARM_FLOAT_ABI_SOFT
:
17241 case ARM_FLOAT_ABI_SOFTFP
:
17242 flags
|= F_SOFT_FLOAT
;
17245 case ARM_FLOAT_ABI_HARD
:
17246 if (flags
& F_SOFT_FLOAT
)
17247 as_bad (_("hard-float conflicts with specified fpu"));
17251 /* Using pure-endian doubles (even if soft-float). */
17252 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
17253 flags
|= F_VFP_FLOAT
;
17255 #if defined OBJ_ELF
17256 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
17257 flags
|= EF_ARM_MAVERICK_FLOAT
;
17260 case EF_ARM_EABI_VER4
:
17261 case EF_ARM_EABI_VER5
:
17262 /* No additional flags to set. */
17269 bfd_set_private_flags (stdoutput
, flags
);
17271 /* We have run out flags in the COFF header to encode the
17272 status of ATPCS support, so instead we create a dummy,
17273 empty, debug section called .arm.atpcs. */
17278 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
17282 bfd_set_section_flags
17283 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
17284 bfd_set_section_size (stdoutput
, sec
, 0);
17285 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
17291 /* Record the CPU type as well. */
17292 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
17293 mach
= bfd_mach_arm_iWMMXt
;
17294 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
17295 mach
= bfd_mach_arm_XScale
;
17296 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
17297 mach
= bfd_mach_arm_ep9312
;
17298 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
17299 mach
= bfd_mach_arm_5TE
;
17300 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
17302 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
17303 mach
= bfd_mach_arm_5T
;
17305 mach
= bfd_mach_arm_5
;
17307 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
17309 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
17310 mach
= bfd_mach_arm_4T
;
17312 mach
= bfd_mach_arm_4
;
17314 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
17315 mach
= bfd_mach_arm_3M
;
17316 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
17317 mach
= bfd_mach_arm_3
;
17318 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
17319 mach
= bfd_mach_arm_2a
;
17320 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
17321 mach
= bfd_mach_arm_2
;
17323 mach
= bfd_mach_arm_unknown
;
17325 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
17328 /* Command line processing. */
17331 Invocation line includes a switch not recognized by the base assembler.
17332 See if it's a processor-specific option.
17334 This routine is somewhat complicated by the need for backwards
17335 compatibility (since older releases of gcc can't be changed).
17336 The new options try to make the interface as compatible as
17339 New options (supported) are:
17341 -mcpu=<cpu name> Assemble for selected processor
17342 -march=<architecture name> Assemble for selected architecture
17343 -mfpu=<fpu architecture> Assemble for selected FPU.
17344 -EB/-mbig-endian Big-endian
17345 -EL/-mlittle-endian Little-endian
17346 -k Generate PIC code
17347 -mthumb Start in Thumb mode
17348 -mthumb-interwork Code supports ARM/Thumb interworking
17350 For now we will also provide support for:
17352 -mapcs-32 32-bit Program counter
17353 -mapcs-26 26-bit Program counter
17354 -macps-float Floats passed in FP registers
17355 -mapcs-reentrant Reentrant code
17357 (sometime these will probably be replaced with -mapcs=<list of options>
17358 and -matpcs=<list of options>)
17360 The remaining options are only supported for back-wards compatibility.
17361 Cpu variants, the arm part is optional:
17362 -m[arm]1 Currently not supported.
17363 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17364 -m[arm]3 Arm 3 processor
17365 -m[arm]6[xx], Arm 6 processors
17366 -m[arm]7[xx][t][[d]m] Arm 7 processors
17367 -m[arm]8[10] Arm 8 processors
17368 -m[arm]9[20][tdmi] Arm 9 processors
17369 -mstrongarm[110[0]] StrongARM processors
17370 -mxscale XScale processors
17371 -m[arm]v[2345[t[e]]] Arm architectures
17372 -mall All (except the ARM1)
17374 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17375 -mfpe-old (No float load/store multiples)
17376 -mvfpxd VFP Single precision
17378 -mno-fpu Disable all floating point instructions
17380 The following CPU names are recognized:
17381 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17382 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17383 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17384 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17385 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17386 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17387 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17391 const char * md_shortopts
= "m:k";
17393 #ifdef ARM_BI_ENDIAN
17394 #define OPTION_EB (OPTION_MD_BASE + 0)
17395 #define OPTION_EL (OPTION_MD_BASE + 1)
17397 #if TARGET_BYTES_BIG_ENDIAN
17398 #define OPTION_EB (OPTION_MD_BASE + 0)
17400 #define OPTION_EL (OPTION_MD_BASE + 1)
17404 struct option md_longopts
[] =
17407 {"EB", no_argument
, NULL
, OPTION_EB
},
17410 {"EL", no_argument
, NULL
, OPTION_EL
},
17412 {NULL
, no_argument
, NULL
, 0}
17415 size_t md_longopts_size
= sizeof (md_longopts
);
17417 struct arm_option_table
17419 char *option
; /* Option name to match. */
17420 char *help
; /* Help information. */
17421 int *var
; /* Variable to change. */
17422 int value
; /* What to change it to. */
17423 char *deprecated
; /* If non-null, print this message. */
17426 struct arm_option_table arm_opts
[] =
17428 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
17429 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
17430 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17431 &support_interwork
, 1, NULL
},
17432 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
17433 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
17434 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
17436 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
17437 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
17438 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
17439 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
17442 /* These are recognized by the assembler, but have no affect on code. */
17443 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
17444 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
17445 {NULL
, NULL
, NULL
, 0, NULL
}
17448 struct arm_legacy_option_table
17450 char *option
; /* Option name to match. */
17451 const arm_feature_set
**var
; /* Variable to change. */
17452 const arm_feature_set value
; /* What to change it to. */
17453 char *deprecated
; /* If non-null, print this message. */
17456 const struct arm_legacy_option_table arm_legacy_opts
[] =
17458 /* DON'T add any new processors to this list -- we want the whole list
17459 to go away... Add them to the processors table instead. */
17460 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
17461 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
17462 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
17463 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
17464 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
17465 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
17466 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
17467 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
17468 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
17469 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
17470 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
17471 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
17472 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
17473 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
17474 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
17475 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
17476 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
17477 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
17478 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
17479 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
17480 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
17481 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
17482 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
17483 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
17484 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
17485 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
17486 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
17487 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
17488 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
17489 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
17490 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
17491 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
17492 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
17493 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
17494 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
17495 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
17496 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
17497 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
17498 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
17499 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
17500 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
17501 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
17502 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
17503 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
17504 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
17505 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
17506 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17507 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17508 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17509 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17510 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
17511 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
17512 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
17513 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
17514 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
17515 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
17516 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
17517 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
17518 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
17519 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
17520 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
17521 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
17522 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
17523 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
17524 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
17525 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
17526 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
17527 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
17528 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
17529 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
17530 N_("use -mcpu=strongarm110")},
17531 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
17532 N_("use -mcpu=strongarm1100")},
17533 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
17534 N_("use -mcpu=strongarm1110")},
17535 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
17536 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
17537 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
17539 /* Architecture variants -- don't add any more to this list either. */
17540 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
17541 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
17542 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
17543 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
17544 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
17545 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
17546 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
17547 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
17548 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
17549 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
17550 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
17551 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
17552 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
17553 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
17554 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
17555 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
17556 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
17557 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
17559 /* Floating point variants -- don't add any more to this list either. */
17560 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
17561 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
17562 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
17563 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
17564 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17566 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
17569 struct arm_cpu_option_table
17572 const arm_feature_set value
;
17573 /* For some CPUs we assume an FPU unless the user explicitly sets
17575 const arm_feature_set default_fpu
;
17576 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17578 const char *canonical_name
;
17581 /* This list should, at a minimum, contain all the cpu names
17582 recognized by GCC. */
17583 static const struct arm_cpu_option_table arm_cpus
[] =
17585 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
17586 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
17587 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
17588 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
17589 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
17590 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17591 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17592 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17593 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17594 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17595 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17596 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
17597 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17598 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
17599 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17600 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
17601 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17602 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17603 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17604 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17605 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17606 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17607 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17608 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17609 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17610 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17611 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17612 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17613 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17614 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17615 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17616 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17617 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17618 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17619 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17620 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17621 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17622 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17623 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17624 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
17625 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17626 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17627 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17628 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17629 /* For V5 or later processors we default to using VFP; but the user
17630 should really set the FPU type explicitly. */
17631 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
17632 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17633 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
17634 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
17635 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
17636 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
17637 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
17638 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17639 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
17640 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
17641 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17642 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17643 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
17644 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
17645 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17646 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
17647 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
17648 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17649 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17650 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
17651 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
17652 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
17653 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
17654 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
17655 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
17656 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
17657 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
17658 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
17659 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
17660 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
17661 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
17662 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
17663 | FPU_NEON_EXT_V1
),
17665 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
17666 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
17667 /* ??? XSCALE is really an architecture. */
17668 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
17669 /* ??? iwmmxt is not a processor. */
17670 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
17671 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
17673 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
17674 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
17677 struct arm_arch_option_table
17680 const arm_feature_set value
;
17681 const arm_feature_set default_fpu
;
17684 /* This list should, at a minimum, contain all the architecture names
17685 recognized by GCC. */
17686 static const struct arm_arch_option_table arm_archs
[] =
17688 {"all", ARM_ANY
, FPU_ARCH_FPA
},
17689 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
17690 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
17691 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
17692 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
17693 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
17694 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
17695 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
17696 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
17697 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
17698 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
17699 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
17700 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
17701 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
17702 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
17703 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
17704 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
17705 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
17706 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
17707 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
17708 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
17709 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
17710 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
17711 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
17712 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
17713 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
17714 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
17715 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
17716 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
17717 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
17718 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
17719 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
17720 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
17723 /* ISA extensions in the co-processor space. */
17724 struct arm_option_cpu_value_table
17727 const arm_feature_set value
;
17730 static const struct arm_option_cpu_value_table arm_extensions
[] =
17732 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
17733 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
17734 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
17735 {NULL
, ARM_ARCH_NONE
}
17738 /* This list should, at a minimum, contain all the fpu names
17739 recognized by GCC. */
17740 static const struct arm_option_cpu_value_table arm_fpus
[] =
17742 {"softfpa", FPU_NONE
},
17743 {"fpe", FPU_ARCH_FPE
},
17744 {"fpe2", FPU_ARCH_FPE
},
17745 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
17746 {"fpa", FPU_ARCH_FPA
},
17747 {"fpa10", FPU_ARCH_FPA
},
17748 {"fpa11", FPU_ARCH_FPA
},
17749 {"arm7500fe", FPU_ARCH_FPA
},
17750 {"softvfp", FPU_ARCH_VFP
},
17751 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
17752 {"vfp", FPU_ARCH_VFP_V2
},
17753 {"vfp9", FPU_ARCH_VFP_V2
},
17754 {"vfp3", FPU_ARCH_VFP_V3
},
17755 {"vfp10", FPU_ARCH_VFP_V2
},
17756 {"vfp10-r0", FPU_ARCH_VFP_V1
},
17757 {"vfpxd", FPU_ARCH_VFP_V1xD
},
17758 {"arm1020t", FPU_ARCH_VFP_V1
},
17759 {"arm1020e", FPU_ARCH_VFP_V2
},
17760 {"arm1136jfs", FPU_ARCH_VFP_V2
},
17761 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
17762 {"maverick", FPU_ARCH_MAVERICK
},
17763 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
17764 {NULL
, ARM_ARCH_NONE
}
17767 struct arm_option_value_table
17773 static const struct arm_option_value_table arm_float_abis
[] =
17775 {"hard", ARM_FLOAT_ABI_HARD
},
17776 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
17777 {"soft", ARM_FLOAT_ABI_SOFT
},
17782 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
17783 static const struct arm_option_value_table arm_eabis
[] =
17785 {"gnu", EF_ARM_EABI_UNKNOWN
},
17786 {"4", EF_ARM_EABI_VER4
},
17787 {"5", EF_ARM_EABI_VER5
},
17792 struct arm_long_option_table
17794 char * option
; /* Substring to match. */
17795 char * help
; /* Help information. */
17796 int (* func
) (char * subopt
); /* Function to decode sub-option. */
17797 char * deprecated
; /* If non-null, print this message. */
17801 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
17803 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
17805 /* Copy the feature set, so that we can modify it. */
17806 *ext_set
= **opt_p
;
17809 while (str
!= NULL
&& *str
!= 0)
17811 const struct arm_option_cpu_value_table
* opt
;
17817 as_bad (_("invalid architectural extension"));
17822 ext
= strchr (str
, '+');
17825 optlen
= ext
- str
;
17827 optlen
= strlen (str
);
17831 as_bad (_("missing architectural extension"));
17835 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
17836 if (strncmp (opt
->name
, str
, optlen
) == 0)
17838 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
17842 if (opt
->name
== NULL
)
17844 as_bad (_("unknown architectural extnsion `%s'"), str
);
17855 arm_parse_cpu (char * str
)
17857 const struct arm_cpu_option_table
* opt
;
17858 char * ext
= strchr (str
, '+');
17862 optlen
= ext
- str
;
17864 optlen
= strlen (str
);
17868 as_bad (_("missing cpu name `%s'"), str
);
17872 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
17873 if (strncmp (opt
->name
, str
, optlen
) == 0)
17875 mcpu_cpu_opt
= &opt
->value
;
17876 mcpu_fpu_opt
= &opt
->default_fpu
;
17877 if (opt
->canonical_name
)
17878 strcpy(selected_cpu_name
, opt
->canonical_name
);
17882 for (i
= 0; i
< optlen
; i
++)
17883 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
17884 selected_cpu_name
[i
] = 0;
17888 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
17893 as_bad (_("unknown cpu `%s'"), str
);
17898 arm_parse_arch (char * str
)
17900 const struct arm_arch_option_table
*opt
;
17901 char *ext
= strchr (str
, '+');
17905 optlen
= ext
- str
;
17907 optlen
= strlen (str
);
17911 as_bad (_("missing architecture name `%s'"), str
);
17915 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
17916 if (streq (opt
->name
, str
))
17918 march_cpu_opt
= &opt
->value
;
17919 march_fpu_opt
= &opt
->default_fpu
;
17920 strcpy(selected_cpu_name
, opt
->name
);
17923 return arm_parse_extension (ext
, &march_cpu_opt
);
17928 as_bad (_("unknown architecture `%s'\n"), str
);
17933 arm_parse_fpu (char * str
)
17935 const struct arm_option_cpu_value_table
* opt
;
17937 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
17938 if (streq (opt
->name
, str
))
17940 mfpu_opt
= &opt
->value
;
17944 as_bad (_("unknown floating point format `%s'\n"), str
);
17949 arm_parse_float_abi (char * str
)
17951 const struct arm_option_value_table
* opt
;
17953 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
17954 if (streq (opt
->name
, str
))
17956 mfloat_abi_opt
= opt
->value
;
17960 as_bad (_("unknown floating point abi `%s'\n"), str
);
17966 arm_parse_eabi (char * str
)
17968 const struct arm_option_value_table
*opt
;
17970 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
17971 if (streq (opt
->name
, str
))
17973 meabi_flags
= opt
->value
;
17976 as_bad (_("unknown EABI `%s'\n"), str
);
17981 struct arm_long_option_table arm_long_opts
[] =
17983 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
17984 arm_parse_cpu
, NULL
},
17985 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
17986 arm_parse_arch
, NULL
},
17987 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
17988 arm_parse_fpu
, NULL
},
17989 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
17990 arm_parse_float_abi
, NULL
},
17992 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
17993 arm_parse_eabi
, NULL
},
17995 {NULL
, NULL
, 0, NULL
}
17999 md_parse_option (int c
, char * arg
)
18001 struct arm_option_table
*opt
;
18002 const struct arm_legacy_option_table
*fopt
;
18003 struct arm_long_option_table
*lopt
;
18009 target_big_endian
= 1;
18015 target_big_endian
= 0;
18020 /* Listing option. Just ignore these, we don't support additional
18025 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
18027 if (c
== opt
->option
[0]
18028 && ((arg
== NULL
&& opt
->option
[1] == 0)
18029 || streq (arg
, opt
->option
+ 1)))
18031 #if WARN_DEPRECATED
18032 /* If the option is deprecated, tell the user. */
18033 if (opt
->deprecated
!= NULL
)
18034 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
18035 arg
? arg
: "", _(opt
->deprecated
));
18038 if (opt
->var
!= NULL
)
18039 *opt
->var
= opt
->value
;
18045 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
18047 if (c
== fopt
->option
[0]
18048 && ((arg
== NULL
&& fopt
->option
[1] == 0)
18049 || streq (arg
, fopt
->option
+ 1)))
18051 #if WARN_DEPRECATED
18052 /* If the option is deprecated, tell the user. */
18053 if (fopt
->deprecated
!= NULL
)
18054 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
18055 arg
? arg
: "", _(fopt
->deprecated
));
18058 if (fopt
->var
!= NULL
)
18059 *fopt
->var
= &fopt
->value
;
18065 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
18067 /* These options are expected to have an argument. */
18068 if (c
== lopt
->option
[0]
18070 && strncmp (arg
, lopt
->option
+ 1,
18071 strlen (lopt
->option
+ 1)) == 0)
18073 #if WARN_DEPRECATED
18074 /* If the option is deprecated, tell the user. */
18075 if (lopt
->deprecated
!= NULL
)
18076 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
18077 _(lopt
->deprecated
));
18080 /* Call the sup-option parser. */
18081 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
18092 md_show_usage (FILE * fp
)
18094 struct arm_option_table
*opt
;
18095 struct arm_long_option_table
*lopt
;
18097 fprintf (fp
, _(" ARM-specific assembler options:\n"));
18099 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
18100 if (opt
->help
!= NULL
)
18101 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
18103 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
18104 if (lopt
->help
!= NULL
)
18105 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
18109 -EB assemble code for a big-endian cpu\n"));
18114 -EL assemble code for a little-endian cpu\n"));
18123 arm_feature_set flags
;
18124 } cpu_arch_ver_table
;
18126 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18127 least features first. */
18128 static const cpu_arch_ver_table cpu_arch_ver
[] =
18133 {4, ARM_ARCH_V5TE
},
18134 {5, ARM_ARCH_V5TEJ
},
18138 {9, ARM_ARCH_V6T2
},
18139 {10, ARM_ARCH_V7A
},
18140 {10, ARM_ARCH_V7R
},
18141 {10, ARM_ARCH_V7M
},
18145 /* Set the public EABI object attributes. */
18147 aeabi_set_public_attributes (void)
18150 arm_feature_set flags
;
18151 arm_feature_set tmp
;
18152 const cpu_arch_ver_table
*p
;
18154 /* Choose the architecture based on the capabilities of the requested cpu
18155 (if any) and/or the instructions actually used. */
18156 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
18157 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
18158 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
18162 for (p
= cpu_arch_ver
; p
->val
; p
++)
18164 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
18167 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
18171 /* Tag_CPU_name. */
18172 if (selected_cpu_name
[0])
18176 p
= selected_cpu_name
;
18177 if (strncmp(p
, "armv", 4) == 0)
18182 for (i
= 0; p
[i
]; i
++)
18183 p
[i
] = TOUPPER (p
[i
]);
18185 elf32_arm_add_eabi_attr_string (stdoutput
, 5, p
);
18187 /* Tag_CPU_arch. */
18188 elf32_arm_add_eabi_attr_int (stdoutput
, 6, arch
);
18189 /* Tag_CPU_arch_profile. */
18190 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
18191 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'A');
18192 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
18193 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'R');
18194 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
))
18195 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'M');
18196 /* Tag_ARM_ISA_use. */
18197 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_full
))
18198 elf32_arm_add_eabi_attr_int (stdoutput
, 8, 1);
18199 /* Tag_THUMB_ISA_use. */
18200 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_full
))
18201 elf32_arm_add_eabi_attr_int (stdoutput
, 9,
18202 ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
) ? 2 : 1);
18203 /* Tag_VFP_arch. */
18204 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v3
)
18205 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v3
))
18206 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 3);
18207 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v2
)
18208 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v2
))
18209 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 2);
18210 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1
)
18211 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1
)
18212 || ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1xd
)
18213 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1xd
))
18214 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 1);
18215 /* Tag_WMMX_arch. */
18216 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_cext_iwmmxt
)
18217 || ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_cext_iwmmxt
))
18218 elf32_arm_add_eabi_attr_int (stdoutput
, 11, 1);
18219 /* Tag_NEON_arch. */
18220 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_neon_ext_v1
)
18221 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_neon_ext_v1
))
18222 elf32_arm_add_eabi_attr_int (stdoutput
, 12, 1);
18225 /* Add the .ARM.attributes section. */
18234 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
18237 aeabi_set_public_attributes ();
18238 size
= elf32_arm_eabi_attr_size (stdoutput
);
18239 s
= subseg_new (".ARM.attributes", 0);
18240 bfd_set_section_flags (stdoutput
, s
, SEC_READONLY
| SEC_DATA
);
18241 addr
= frag_now_fix ();
18242 p
= frag_more (size
);
18243 elf32_arm_set_eabi_attr_contents (stdoutput
, (bfd_byte
*)p
, size
);
18245 #endif /* OBJ_ELF */
18248 /* Parse a .cpu directive. */
18251 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
18253 const struct arm_cpu_option_table
*opt
;
18257 name
= input_line_pointer
;
18258 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
18259 input_line_pointer
++;
18260 saved_char
= *input_line_pointer
;
18261 *input_line_pointer
= 0;
18263 /* Skip the first "all" entry. */
18264 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
18265 if (streq (opt
->name
, name
))
18267 mcpu_cpu_opt
= &opt
->value
;
18268 selected_cpu
= opt
->value
;
18269 if (opt
->canonical_name
)
18270 strcpy(selected_cpu_name
, opt
->canonical_name
);
18274 for (i
= 0; opt
->name
[i
]; i
++)
18275 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
18276 selected_cpu_name
[i
] = 0;
18278 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18279 *input_line_pointer
= saved_char
;
18280 demand_empty_rest_of_line ();
18283 as_bad (_("unknown cpu `%s'"), name
);
18284 *input_line_pointer
= saved_char
;
18285 ignore_rest_of_line ();
18289 /* Parse a .arch directive. */
18292 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
18294 const struct arm_arch_option_table
*opt
;
18298 name
= input_line_pointer
;
18299 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
18300 input_line_pointer
++;
18301 saved_char
= *input_line_pointer
;
18302 *input_line_pointer
= 0;
18304 /* Skip the first "all" entry. */
18305 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
18306 if (streq (opt
->name
, name
))
18308 mcpu_cpu_opt
= &opt
->value
;
18309 selected_cpu
= opt
->value
;
18310 strcpy(selected_cpu_name
, opt
->name
);
18311 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18312 *input_line_pointer
= saved_char
;
18313 demand_empty_rest_of_line ();
18317 as_bad (_("unknown architecture `%s'\n"), name
);
18318 *input_line_pointer
= saved_char
;
18319 ignore_rest_of_line ();
18323 /* Parse a .fpu directive. */
18326 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
18328 const struct arm_option_cpu_value_table
*opt
;
18332 name
= input_line_pointer
;
18333 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
18334 input_line_pointer
++;
18335 saved_char
= *input_line_pointer
;
18336 *input_line_pointer
= 0;
18338 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
18339 if (streq (opt
->name
, name
))
18341 mfpu_opt
= &opt
->value
;
18342 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18343 *input_line_pointer
= saved_char
;
18344 demand_empty_rest_of_line ();
18348 as_bad (_("unknown floating point format `%s'\n"), name
);
18349 *input_line_pointer
= saved_char
;
18350 ignore_rest_of_line ();