1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2021 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
47 #ifndef INFER_ADDR_PREFIX
48 #define INFER_ADDR_PREFIX 1
52 #define DEFAULT_ARCH "i386"
57 #define INLINE __inline__
63 /* Prefixes will be emitted in the order defined below.
64 WAIT_PREFIX must be the first prefix since FWAIT is really is an
65 instruction, and so must come before any prefixes.
66 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
67 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
73 #define HLE_PREFIX REP_PREFIX
74 #define BND_PREFIX REP_PREFIX
76 #define REX_PREFIX 6 /* must come last. */
77 #define MAX_PREFIXES 7 /* max prefixes per opcode */
79 /* we define the syntax here (modulo base,index,scale syntax) */
80 #define REGISTER_PREFIX '%'
81 #define IMMEDIATE_PREFIX '$'
82 #define ABSOLUTE_PREFIX '*'
84 /* these are the instruction mnemonic suffixes in AT&T syntax or
85 memory operand size in Intel syntax. */
86 #define WORD_MNEM_SUFFIX 'w'
87 #define BYTE_MNEM_SUFFIX 'b'
88 #define SHORT_MNEM_SUFFIX 's'
89 #define LONG_MNEM_SUFFIX 'l'
90 #define QWORD_MNEM_SUFFIX 'q'
91 /* Intel Syntax. Use a non-ascii letter since since it never appears
93 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
95 #define END_OF_INSN '\0'
97 /* This matches the C -> StaticRounding alias in the opcode table. */
98 #define commutative staticrounding
101 'templates' is for grouping together 'template' structures for opcodes
102 of the same name. This is only used for storing the insns in the grand
103 ole hash table of insns.
104 The templates themselves start at START and range up to (but not including)
109 const insn_template
*start
;
110 const insn_template
*end
;
114 /* 386 operand encoding bytes: see 386 book for details of this. */
117 unsigned int regmem
; /* codes register or memory operand */
118 unsigned int reg
; /* codes register operand (or extended opcode) */
119 unsigned int mode
; /* how to interpret regmem & reg */
123 /* x86-64 extension prefix. */
124 typedef int rex_byte
;
126 /* 386 opcode byte to code indirect addressing. */
135 /* x86 arch names, types and features */
138 const char *name
; /* arch name */
139 unsigned int len
; /* arch string length */
140 enum processor_type type
; /* arch type */
141 i386_cpu_flags flags
; /* cpu feature flags */
142 unsigned int skip
; /* show_arch should skip this. */
146 /* Used to turn off indicated flags. */
149 const char *name
; /* arch name */
150 unsigned int len
; /* arch string length */
151 i386_cpu_flags flags
; /* cpu feature flags */
155 static void update_code_flag (int, int);
156 static void set_code_flag (int);
157 static void set_16bit_gcc_code_flag (int);
158 static void set_intel_syntax (int);
159 static void set_intel_mnemonic (int);
160 static void set_allow_index_reg (int);
161 static void set_check (int);
162 static void set_cpu_arch (int);
164 static void pe_directive_secrel (int);
166 static void signed_cons (int);
167 static char *output_invalid (int c
);
168 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
170 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
172 static int i386_att_operand (char *);
173 static int i386_intel_operand (char *, int);
174 static int i386_intel_simplify (expressionS
*);
175 static int i386_intel_parse_name (const char *, expressionS
*);
176 static const reg_entry
*parse_register (char *, char **);
177 static char *parse_insn (char *, char *);
178 static char *parse_operands (char *, const char *);
179 static void swap_operands (void);
180 static void swap_2_operands (unsigned int, unsigned int);
181 static enum flag_code
i386_addressing_mode (void);
182 static void optimize_imm (void);
183 static void optimize_disp (void);
184 static const insn_template
*match_template (char);
185 static int check_string (void);
186 static int process_suffix (void);
187 static int check_byte_reg (void);
188 static int check_long_reg (void);
189 static int check_qword_reg (void);
190 static int check_word_reg (void);
191 static int finalize_imm (void);
192 static int process_operands (void);
193 static const reg_entry
*build_modrm_byte (void);
194 static void output_insn (void);
195 static void output_imm (fragS
*, offsetT
);
196 static void output_disp (fragS
*, offsetT
);
198 static void s_bss (int);
200 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
201 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
203 /* GNU_PROPERTY_X86_ISA_1_USED. */
204 static unsigned int x86_isa_1_used
;
205 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
206 static unsigned int x86_feature_2_used
;
207 /* Generate x86 used ISA and feature properties. */
208 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
211 static const char *default_arch
= DEFAULT_ARCH
;
213 /* parse_register() returns this when a register alias cannot be used. */
214 static const reg_entry bad_reg
= { "<bad>", OPERAND_TYPE_NONE
, 0, 0,
215 { Dw2Inval
, Dw2Inval
} };
217 static const reg_entry
*reg_eax
;
218 static const reg_entry
*reg_ds
;
219 static const reg_entry
*reg_es
;
220 static const reg_entry
*reg_ss
;
221 static const reg_entry
*reg_st0
;
222 static const reg_entry
*reg_k0
;
227 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
228 unsigned char bytes
[4];
230 /* Destination or source register specifier. */
231 const reg_entry
*register_specifier
;
234 /* 'md_assemble ()' gathers together information and puts it into a
241 const reg_entry
*regs
;
246 operand_size_mismatch
,
247 operand_type_mismatch
,
248 register_type_mismatch
,
249 number_of_operands_mismatch
,
250 invalid_instruction_suffix
,
252 unsupported_with_intel_mnemonic
,
256 invalid_vsib_address
,
257 invalid_vector_register_set
,
258 invalid_tmm_register_set
,
259 unsupported_vector_index_register
,
260 unsupported_broadcast
,
263 mask_not_on_destination
,
266 rc_sae_operand_not_last_imm
,
267 invalid_register_operand
,
272 /* TM holds the template for the insn were currently assembling. */
275 /* SUFFIX holds the instruction size suffix for byte, word, dword
276 or qword, if given. */
279 /* OPCODE_LENGTH holds the number of base opcode bytes. */
280 unsigned char opcode_length
;
282 /* OPERANDS gives the number of given operands. */
283 unsigned int operands
;
285 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
286 of given register, displacement, memory operands and immediate
288 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
290 /* TYPES [i] is the type (see above #defines) which tells us how to
291 use OP[i] for the corresponding operand. */
292 i386_operand_type types
[MAX_OPERANDS
];
294 /* Displacement expression, immediate expression, or register for each
296 union i386_op op
[MAX_OPERANDS
];
298 /* Flags for operands. */
299 unsigned int flags
[MAX_OPERANDS
];
300 #define Operand_PCrel 1
301 #define Operand_Mem 2
303 /* Relocation type for operand */
304 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
306 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
307 the base index byte below. */
308 const reg_entry
*base_reg
;
309 const reg_entry
*index_reg
;
310 unsigned int log2_scale_factor
;
312 /* SEG gives the seg_entries of this insn. They are zero unless
313 explicit segment overrides are given. */
314 const reg_entry
*seg
[2];
316 /* Copied first memory operand string, for re-checking. */
319 /* PREFIX holds all the given prefix opcodes (usually null).
320 PREFIXES is the number of prefix opcodes. */
321 unsigned int prefixes
;
322 unsigned char prefix
[MAX_PREFIXES
];
324 /* Register is in low 3 bits of opcode. */
327 /* The operand to a branch insn indicates an absolute branch. */
330 /* Extended states. */
338 xstate_ymm
= 1 << 2 | xstate_xmm
,
340 xstate_zmm
= 1 << 3 | xstate_ymm
,
343 /* Use MASK state. */
347 /* Has GOTPC or TLS relocation. */
348 bool has_gotpc_tls_reloc
;
350 /* RM and SIB are the modrm byte and the sib byte where the
351 addressing modes of this insn are encoded. */
358 /* Masking attributes.
360 The struct describes masking, applied to OPERAND in the instruction.
361 REG is a pointer to the corresponding mask register. ZEROING tells
362 whether merging or zeroing mask is used. */
363 struct Mask_Operation
365 const reg_entry
*reg
;
366 unsigned int zeroing
;
367 /* The operand where this operation is associated. */
368 unsigned int operand
;
371 /* Rounding control and SAE attributes. */
384 unsigned int operand
;
387 /* Broadcasting attributes.
389 The struct describes broadcasting, applied to OPERAND. TYPE is
390 expresses the broadcast factor. */
391 struct Broadcast_Operation
393 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
396 /* Index of broadcasted operand. */
397 unsigned int operand
;
399 /* Number of bytes to broadcast. */
403 /* Compressed disp8*N attribute. */
404 unsigned int memshift
;
406 /* Prefer load or store in encoding. */
409 dir_encoding_default
= 0,
415 /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
418 disp_encoding_default
= 0,
424 /* Prefer the REX byte in encoding. */
427 /* Disable instruction size optimization. */
430 /* How to encode vector instructions. */
433 vex_encoding_default
= 0,
441 const char *rep_prefix
;
444 const char *hle_prefix
;
446 /* Have BND prefix. */
447 const char *bnd_prefix
;
449 /* Have NOTRACK prefix. */
450 const char *notrack_prefix
;
453 enum i386_error error
;
456 typedef struct _i386_insn i386_insn
;
458 /* Link RC type with corresponding string, that'll be looked for in
467 static const struct RC_name RC_NamesTable
[] =
469 { rne
, STRING_COMMA_LEN ("rn-sae") },
470 { rd
, STRING_COMMA_LEN ("rd-sae") },
471 { ru
, STRING_COMMA_LEN ("ru-sae") },
472 { rz
, STRING_COMMA_LEN ("rz-sae") },
473 { saeonly
, STRING_COMMA_LEN ("sae") },
476 /* List of chars besides those in app.c:symbol_chars that can start an
477 operand. Used to prevent the scrubber eating vital white-space. */
478 const char extra_symbol_chars
[] = "*%-([{}"
487 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
488 && !defined (TE_GNU) \
489 && !defined (TE_LINUX) \
490 && !defined (TE_FreeBSD) \
491 && !defined (TE_DragonFly) \
492 && !defined (TE_NetBSD))
493 /* This array holds the chars that always start a comment. If the
494 pre-processor is disabled, these aren't very useful. The option
495 --divide will remove '/' from this list. */
496 const char *i386_comment_chars
= "#/";
497 #define SVR4_COMMENT_CHARS 1
498 #define PREFIX_SEPARATOR '\\'
501 const char *i386_comment_chars
= "#";
502 #define PREFIX_SEPARATOR '/'
505 /* This array holds the chars that only start a comment at the beginning of
506 a line. If the line seems to have the form '# 123 filename'
507 .line and .file directives will appear in the pre-processed output.
508 Note that input_file.c hand checks for '#' at the beginning of the
509 first line of the input file. This is because the compiler outputs
510 #NO_APP at the beginning of its output.
511 Also note that comments started like this one will always work if
512 '/' isn't otherwise defined. */
513 const char line_comment_chars
[] = "#/";
515 const char line_separator_chars
[] = ";";
517 /* Chars that can be used to separate mant from exp in floating point
519 const char EXP_CHARS
[] = "eE";
521 /* Chars that mean this number is a floating point constant
524 const char FLT_CHARS
[] = "fFdDxX";
526 /* Tables for lexical analysis. */
527 static char mnemonic_chars
[256];
528 static char register_chars
[256];
529 static char operand_chars
[256];
530 static char identifier_chars
[256];
531 static char digit_chars
[256];
533 /* Lexical macros. */
534 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
535 #define is_operand_char(x) (operand_chars[(unsigned char) x])
536 #define is_register_char(x) (register_chars[(unsigned char) x])
537 #define is_space_char(x) ((x) == ' ')
538 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
539 #define is_digit_char(x) (digit_chars[(unsigned char) x])
541 /* All non-digit non-letter characters that may occur in an operand. */
542 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
544 /* md_assemble() always leaves the strings it's passed unaltered. To
545 effect this we maintain a stack of saved characters that we've smashed
546 with '\0's (indicating end of strings for various sub-fields of the
547 assembler instruction). */
548 static char save_stack
[32];
549 static char *save_stack_p
;
550 #define END_STRING_AND_SAVE(s) \
551 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
552 #define RESTORE_END_STRING(s) \
553 do { *(s) = *--save_stack_p; } while (0)
555 /* The instruction we're assembling. */
558 /* Possible templates for current insn. */
559 static const templates
*current_templates
;
561 /* Per instruction expressionS buffers: max displacements & immediates. */
562 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
563 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
565 /* Current operand we are working on. */
566 static int this_operand
= -1;
568 /* We support four different modes. FLAG_CODE variable is used to distinguish
576 static enum flag_code flag_code
;
577 static unsigned int object_64bit
;
578 static unsigned int disallow_64bit_reloc
;
579 static int use_rela_relocations
= 0;
580 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
581 static const char *tls_get_addr
;
583 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
584 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
585 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
587 /* The ELF ABI to use. */
595 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
598 #if defined (TE_PE) || defined (TE_PEP)
599 /* Use big object file format. */
600 static int use_big_obj
= 0;
603 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
604 /* 1 if generating code for a shared library. */
605 static int shared
= 0;
608 /* 1 for intel syntax,
610 static int intel_syntax
= 0;
612 static enum x86_64_isa
614 amd64
= 1, /* AMD64 ISA. */
615 intel64
/* Intel64 ISA. */
618 /* 1 for intel mnemonic,
619 0 if att mnemonic. */
620 static int intel_mnemonic
= !SYSV386_COMPAT
;
622 /* 1 if pseudo registers are permitted. */
623 static int allow_pseudo_reg
= 0;
625 /* 1 if register prefix % not required. */
626 static int allow_naked_reg
= 0;
628 /* 1 if the assembler should add BND prefix for all control-transferring
629 instructions supporting it, even if this prefix wasn't specified
631 static int add_bnd_prefix
= 0;
633 /* 1 if pseudo index register, eiz/riz, is allowed . */
634 static int allow_index_reg
= 0;
636 /* 1 if the assembler should ignore LOCK prefix, even if it was
637 specified explicitly. */
638 static int omit_lock_prefix
= 0;
640 /* 1 if the assembler should encode lfence, mfence, and sfence as
641 "lock addl $0, (%{re}sp)". */
642 static int avoid_fence
= 0;
644 /* 1 if lfence should be inserted after every load. */
645 static int lfence_after_load
= 0;
647 /* Non-zero if lfence should be inserted before indirect branch. */
648 static enum lfence_before_indirect_branch_kind
650 lfence_branch_none
= 0,
651 lfence_branch_register
,
652 lfence_branch_memory
,
655 lfence_before_indirect_branch
;
657 /* Non-zero if lfence should be inserted before ret. */
658 static enum lfence_before_ret_kind
660 lfence_before_ret_none
= 0,
661 lfence_before_ret_not
,
662 lfence_before_ret_or
,
663 lfence_before_ret_shl
667 /* Types of previous instruction is .byte or prefix. */
682 /* 1 if the assembler should generate relax relocations. */
684 static int generate_relax_relocations
685 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
687 static enum check_kind
693 sse_check
, operand_check
= check_warning
;
695 /* Non-zero if branches should be aligned within power of 2 boundary. */
696 static int align_branch_power
= 0;
698 /* Types of branches to align. */
699 enum align_branch_kind
701 align_branch_none
= 0,
702 align_branch_jcc
= 1,
703 align_branch_fused
= 2,
704 align_branch_jmp
= 3,
705 align_branch_call
= 4,
706 align_branch_indirect
= 5,
710 /* Type bits of branches to align. */
711 enum align_branch_bit
713 align_branch_jcc_bit
= 1 << align_branch_jcc
,
714 align_branch_fused_bit
= 1 << align_branch_fused
,
715 align_branch_jmp_bit
= 1 << align_branch_jmp
,
716 align_branch_call_bit
= 1 << align_branch_call
,
717 align_branch_indirect_bit
= 1 << align_branch_indirect
,
718 align_branch_ret_bit
= 1 << align_branch_ret
721 static unsigned int align_branch
= (align_branch_jcc_bit
722 | align_branch_fused_bit
723 | align_branch_jmp_bit
);
725 /* Types of condition jump used by macro-fusion. */
728 mf_jcc_jo
= 0, /* base opcode 0x70 */
729 mf_jcc_jc
, /* base opcode 0x72 */
730 mf_jcc_je
, /* base opcode 0x74 */
731 mf_jcc_jna
, /* base opcode 0x76 */
732 mf_jcc_js
, /* base opcode 0x78 */
733 mf_jcc_jp
, /* base opcode 0x7a */
734 mf_jcc_jl
, /* base opcode 0x7c */
735 mf_jcc_jle
, /* base opcode 0x7e */
738 /* Types of compare flag-modifying insntructions used by macro-fusion. */
741 mf_cmp_test_and
, /* test/cmp */
742 mf_cmp_alu_cmp
, /* add/sub/cmp */
743 mf_cmp_incdec
/* inc/dec */
746 /* The maximum padding size for fused jcc. CMP like instruction can
747 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
749 #define MAX_FUSED_JCC_PADDING_SIZE 20
751 /* The maximum number of prefixes added for an instruction. */
752 static unsigned int align_branch_prefix_size
= 5;
755 1. Clear the REX_W bit with register operand if possible.
756 2. Above plus use 128bit vector instruction to clear the full vector
759 static int optimize
= 0;
762 1. Clear the REX_W bit with register operand if possible.
763 2. Above plus use 128bit vector instruction to clear the full vector
765 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
768 static int optimize_for_space
= 0;
770 /* Register prefix used for error message. */
771 static const char *register_prefix
= "%";
773 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
774 leave, push, and pop instructions so that gcc has the same stack
775 frame as in 32 bit mode. */
776 static char stackop_size
= '\0';
778 /* Non-zero to optimize code alignment. */
779 int optimize_align_code
= 1;
781 /* Non-zero to quieten some warnings. */
782 static int quiet_warnings
= 0;
785 static const char *cpu_arch_name
= NULL
;
786 static char *cpu_sub_arch_name
= NULL
;
788 /* CPU feature flags. */
789 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
791 /* If we have selected a cpu we are generating instructions for. */
792 static int cpu_arch_tune_set
= 0;
794 /* Cpu we are generating instructions for. */
795 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
797 /* CPU feature flags of cpu we are generating instructions for. */
798 static i386_cpu_flags cpu_arch_tune_flags
;
800 /* CPU instruction set architecture used. */
801 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
803 /* CPU feature flags of instruction set architecture used. */
804 i386_cpu_flags cpu_arch_isa_flags
;
806 /* If set, conditional jumps are not automatically promoted to handle
807 larger than a byte offset. */
808 static unsigned int no_cond_jump_promotion
= 0;
810 /* Encode SSE instructions with VEX prefix. */
811 static unsigned int sse2avx
;
813 /* Encode scalar AVX instructions with specific vector length. */
820 /* Encode VEX WIG instructions with specific vex.w. */
827 /* Encode scalar EVEX LIG instructions with specific vector length. */
835 /* Encode EVEX WIG instructions with specific evex.w. */
842 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
843 static enum rc_type evexrcig
= rne
;
845 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
846 static symbolS
*GOT_symbol
;
848 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
849 unsigned int x86_dwarf2_return_column
;
851 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
852 int x86_cie_data_alignment
;
854 /* Interface to relax_segment.
855 There are 3 major relax states for 386 jump insns because the
856 different types of jumps add different sizes to frags when we're
857 figuring out what sort of jump to choose to reach a given label.
859 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
860 branches which are handled by md_estimate_size_before_relax() and
861 i386_generic_table_relax_frag(). */
864 #define UNCOND_JUMP 0
866 #define COND_JUMP86 2
867 #define BRANCH_PADDING 3
868 #define BRANCH_PREFIX 4
869 #define FUSED_JCC_PADDING 5
874 #define SMALL16 (SMALL | CODE16)
876 #define BIG16 (BIG | CODE16)
880 #define INLINE __inline__
886 #define ENCODE_RELAX_STATE(type, size) \
887 ((relax_substateT) (((type) << 2) | (size)))
888 #define TYPE_FROM_RELAX_STATE(s) \
890 #define DISP_SIZE_FROM_RELAX_STATE(s) \
891 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
893 /* This table is used by relax_frag to promote short jumps to long
894 ones where necessary. SMALL (short) jumps may be promoted to BIG
895 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
896 don't allow a short jump in a 32 bit code segment to be promoted to
897 a 16 bit offset jump because it's slower (requires data size
898 prefix), and doesn't work, unless the destination is in the bottom
899 64k of the code segment (The top 16 bits of eip are zeroed). */
901 const relax_typeS md_relax_table
[] =
904 1) most positive reach of this state,
905 2) most negative reach of this state,
906 3) how many bytes this mode will have in the variable part of the frag
907 4) which index into the table to try if we can't fit into this one. */
909 /* UNCOND_JUMP states. */
910 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
911 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
912 /* dword jmp adds 4 bytes to frag:
913 0 extra opcode bytes, 4 displacement bytes. */
915 /* word jmp adds 2 byte2 to frag:
916 0 extra opcode bytes, 2 displacement bytes. */
919 /* COND_JUMP states. */
920 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
921 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
922 /* dword conditionals adds 5 bytes to frag:
923 1 extra opcode byte, 4 displacement bytes. */
925 /* word conditionals add 3 bytes to frag:
926 1 extra opcode byte, 2 displacement bytes. */
929 /* COND_JUMP86 states. */
930 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
931 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
932 /* dword conditionals adds 5 bytes to frag:
933 1 extra opcode byte, 4 displacement bytes. */
935 /* word conditionals add 4 bytes to frag:
936 1 displacement byte and a 3 byte long branch insn. */
940 static const arch_entry cpu_arch
[] =
942 /* Do not replace the first two entries - i386_target_format()
943 relies on them being there in this order. */
944 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
945 CPU_GENERIC32_FLAGS
, 0 },
946 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
947 CPU_GENERIC64_FLAGS
, 0 },
948 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
950 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
952 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
954 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
956 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
958 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
960 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
962 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
964 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
965 CPU_PENTIUMPRO_FLAGS
, 0 },
966 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
968 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
970 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
972 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
974 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
975 CPU_NOCONA_FLAGS
, 0 },
976 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
978 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
980 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
981 CPU_CORE2_FLAGS
, 1 },
982 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
983 CPU_CORE2_FLAGS
, 0 },
984 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
985 CPU_COREI7_FLAGS
, 0 },
986 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
988 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
990 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
991 CPU_IAMCU_FLAGS
, 0 },
992 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
994 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
996 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
997 CPU_ATHLON_FLAGS
, 0 },
998 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
1000 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
1002 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
1004 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
1005 CPU_AMDFAM10_FLAGS
, 0 },
1006 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
1007 CPU_BDVER1_FLAGS
, 0 },
1008 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
1009 CPU_BDVER2_FLAGS
, 0 },
1010 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
1011 CPU_BDVER3_FLAGS
, 0 },
1012 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
1013 CPU_BDVER4_FLAGS
, 0 },
1014 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
1015 CPU_ZNVER1_FLAGS
, 0 },
1016 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
1017 CPU_ZNVER2_FLAGS
, 0 },
1018 { STRING_COMMA_LEN ("znver3"), PROCESSOR_ZNVER
,
1019 CPU_ZNVER3_FLAGS
, 0 },
1020 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
1021 CPU_BTVER1_FLAGS
, 0 },
1022 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
1023 CPU_BTVER2_FLAGS
, 0 },
1024 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
1025 CPU_8087_FLAGS
, 0 },
1026 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
1028 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
1030 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
1032 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
1033 CPU_CMOV_FLAGS
, 0 },
1034 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
1035 CPU_FXSR_FLAGS
, 0 },
1036 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
1038 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
1040 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
1041 CPU_SSE2_FLAGS
, 0 },
1042 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
1043 CPU_SSE3_FLAGS
, 0 },
1044 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1045 CPU_SSE4A_FLAGS
, 0 },
1046 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
1047 CPU_SSSE3_FLAGS
, 0 },
1048 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
1049 CPU_SSE4_1_FLAGS
, 0 },
1050 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
1051 CPU_SSE4_2_FLAGS
, 0 },
1052 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
1053 CPU_SSE4_2_FLAGS
, 0 },
1054 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1056 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1057 CPU_AVX2_FLAGS
, 0 },
1058 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1059 CPU_AVX512F_FLAGS
, 0 },
1060 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1061 CPU_AVX512CD_FLAGS
, 0 },
1062 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1063 CPU_AVX512ER_FLAGS
, 0 },
1064 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1065 CPU_AVX512PF_FLAGS
, 0 },
1066 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1067 CPU_AVX512DQ_FLAGS
, 0 },
1068 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1069 CPU_AVX512BW_FLAGS
, 0 },
1070 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1071 CPU_AVX512VL_FLAGS
, 0 },
1072 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1074 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1075 CPU_VMFUNC_FLAGS
, 0 },
1076 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1078 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1079 CPU_XSAVE_FLAGS
, 0 },
1080 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1081 CPU_XSAVEOPT_FLAGS
, 0 },
1082 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1083 CPU_XSAVEC_FLAGS
, 0 },
1084 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1085 CPU_XSAVES_FLAGS
, 0 },
1086 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1088 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1089 CPU_PCLMUL_FLAGS
, 0 },
1090 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1091 CPU_PCLMUL_FLAGS
, 1 },
1092 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1093 CPU_FSGSBASE_FLAGS
, 0 },
1094 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1095 CPU_RDRND_FLAGS
, 0 },
1096 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1097 CPU_F16C_FLAGS
, 0 },
1098 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1099 CPU_BMI2_FLAGS
, 0 },
1100 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1102 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1103 CPU_FMA4_FLAGS
, 0 },
1104 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1106 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1108 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1109 CPU_MOVBE_FLAGS
, 0 },
1110 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1111 CPU_CX16_FLAGS
, 0 },
1112 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1114 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1115 CPU_LZCNT_FLAGS
, 0 },
1116 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN
,
1117 CPU_POPCNT_FLAGS
, 0 },
1118 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1120 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1122 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1123 CPU_INVPCID_FLAGS
, 0 },
1124 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1125 CPU_CLFLUSH_FLAGS
, 0 },
1126 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1128 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1129 CPU_SYSCALL_FLAGS
, 0 },
1130 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1131 CPU_RDTSCP_FLAGS
, 0 },
1132 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1133 CPU_3DNOW_FLAGS
, 0 },
1134 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1135 CPU_3DNOWA_FLAGS
, 0 },
1136 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1137 CPU_PADLOCK_FLAGS
, 0 },
1138 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1139 CPU_SVME_FLAGS
, 1 },
1140 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1141 CPU_SVME_FLAGS
, 0 },
1142 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1143 CPU_SSE4A_FLAGS
, 0 },
1144 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1146 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1148 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1150 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1152 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1153 CPU_RDSEED_FLAGS
, 0 },
1154 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1155 CPU_PRFCHW_FLAGS
, 0 },
1156 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1157 CPU_SMAP_FLAGS
, 0 },
1158 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1160 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1162 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1163 CPU_CLFLUSHOPT_FLAGS
, 0 },
1164 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1165 CPU_PREFETCHWT1_FLAGS
, 0 },
1166 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1168 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1169 CPU_CLWB_FLAGS
, 0 },
1170 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1171 CPU_AVX512IFMA_FLAGS
, 0 },
1172 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1173 CPU_AVX512VBMI_FLAGS
, 0 },
1174 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1175 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1176 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1177 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1178 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1179 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1180 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1181 CPU_AVX512_VBMI2_FLAGS
, 0 },
1182 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1183 CPU_AVX512_VNNI_FLAGS
, 0 },
1184 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1185 CPU_AVX512_BITALG_FLAGS
, 0 },
1186 { STRING_COMMA_LEN (".avx_vnni"), PROCESSOR_UNKNOWN
,
1187 CPU_AVX_VNNI_FLAGS
, 0 },
1188 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1189 CPU_CLZERO_FLAGS
, 0 },
1190 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1191 CPU_MWAITX_FLAGS
, 0 },
1192 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1193 CPU_OSPKE_FLAGS
, 0 },
1194 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1195 CPU_RDPID_FLAGS
, 0 },
1196 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1197 CPU_PTWRITE_FLAGS
, 0 },
1198 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1200 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1201 CPU_SHSTK_FLAGS
, 0 },
1202 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1203 CPU_GFNI_FLAGS
, 0 },
1204 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1205 CPU_VAES_FLAGS
, 0 },
1206 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1207 CPU_VPCLMULQDQ_FLAGS
, 0 },
1208 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1209 CPU_WBNOINVD_FLAGS
, 0 },
1210 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1211 CPU_PCONFIG_FLAGS
, 0 },
1212 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1213 CPU_WAITPKG_FLAGS
, 0 },
1214 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1215 CPU_CLDEMOTE_FLAGS
, 0 },
1216 { STRING_COMMA_LEN (".amx_int8"), PROCESSOR_UNKNOWN
,
1217 CPU_AMX_INT8_FLAGS
, 0 },
1218 { STRING_COMMA_LEN (".amx_bf16"), PROCESSOR_UNKNOWN
,
1219 CPU_AMX_BF16_FLAGS
, 0 },
1220 { STRING_COMMA_LEN (".amx_tile"), PROCESSOR_UNKNOWN
,
1221 CPU_AMX_TILE_FLAGS
, 0 },
1222 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1223 CPU_MOVDIRI_FLAGS
, 0 },
1224 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1225 CPU_MOVDIR64B_FLAGS
, 0 },
1226 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1227 CPU_AVX512_BF16_FLAGS
, 0 },
1228 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1229 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1230 { STRING_COMMA_LEN (".tdx"), PROCESSOR_UNKNOWN
,
1232 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1233 CPU_ENQCMD_FLAGS
, 0 },
1234 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN
,
1235 CPU_SERIALIZE_FLAGS
, 0 },
1236 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1237 CPU_RDPRU_FLAGS
, 0 },
1238 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1239 CPU_MCOMMIT_FLAGS
, 0 },
1240 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN
,
1241 CPU_SEV_ES_FLAGS
, 0 },
1242 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN
,
1243 CPU_TSXLDTRK_FLAGS
, 0 },
1244 { STRING_COMMA_LEN (".kl"), PROCESSOR_UNKNOWN
,
1246 { STRING_COMMA_LEN (".widekl"), PROCESSOR_UNKNOWN
,
1247 CPU_WIDEKL_FLAGS
, 0 },
1248 { STRING_COMMA_LEN (".uintr"), PROCESSOR_UNKNOWN
,
1249 CPU_UINTR_FLAGS
, 0 },
1250 { STRING_COMMA_LEN (".hreset"), PROCESSOR_UNKNOWN
,
1251 CPU_HRESET_FLAGS
, 0 },
1254 static const noarch_entry cpu_noarch
[] =
1256 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1257 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1258 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1259 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1260 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1261 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1262 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1263 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1264 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1265 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1266 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS
},
1267 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1268 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1269 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1270 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1271 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1272 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1273 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1274 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1275 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1276 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1277 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1278 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1279 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1280 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1281 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1282 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1283 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1284 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1285 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1286 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1287 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1288 { STRING_COMMA_LEN ("noavx_vnni"), CPU_ANY_AVX_VNNI_FLAGS
},
1289 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1290 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1291 { STRING_COMMA_LEN ("noamx_int8"), CPU_ANY_AMX_INT8_FLAGS
},
1292 { STRING_COMMA_LEN ("noamx_bf16"), CPU_ANY_AMX_BF16_FLAGS
},
1293 { STRING_COMMA_LEN ("noamx_tile"), CPU_ANY_AMX_TILE_FLAGS
},
1294 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1295 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1296 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1297 { STRING_COMMA_LEN ("noavx512_vp2intersect"),
1298 CPU_ANY_AVX512_VP2INTERSECT_FLAGS
},
1299 { STRING_COMMA_LEN ("notdx"), CPU_ANY_TDX_FLAGS
},
1300 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1301 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS
},
1302 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS
},
1303 { STRING_COMMA_LEN ("nokl"), CPU_ANY_KL_FLAGS
},
1304 { STRING_COMMA_LEN ("nowidekl"), CPU_ANY_WIDEKL_FLAGS
},
1305 { STRING_COMMA_LEN ("nouintr"), CPU_ANY_UINTR_FLAGS
},
1306 { STRING_COMMA_LEN ("nohreset"), CPU_ANY_HRESET_FLAGS
},
1310 /* Like s_lcomm_internal in gas/read.c but the alignment string
1311 is allowed to be optional. */
1314 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1321 && *input_line_pointer
== ',')
1323 align
= parse_align (needs_align
- 1);
1325 if (align
== (addressT
) -1)
1340 bss_alloc (symbolP
, size
, align
);
1345 pe_lcomm (int needs_align
)
1347 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1351 const pseudo_typeS md_pseudo_table
[] =
1353 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1354 {"align", s_align_bytes
, 0},
1356 {"align", s_align_ptwo
, 0},
1358 {"arch", set_cpu_arch
, 0},
1362 {"lcomm", pe_lcomm
, 1},
1364 {"ffloat", float_cons
, 'f'},
1365 {"dfloat", float_cons
, 'd'},
1366 {"tfloat", float_cons
, 'x'},
1368 {"slong", signed_cons
, 4},
1369 {"noopt", s_ignore
, 0},
1370 {"optim", s_ignore
, 0},
1371 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1372 {"code16", set_code_flag
, CODE_16BIT
},
1373 {"code32", set_code_flag
, CODE_32BIT
},
1375 {"code64", set_code_flag
, CODE_64BIT
},
1377 {"intel_syntax", set_intel_syntax
, 1},
1378 {"att_syntax", set_intel_syntax
, 0},
1379 {"intel_mnemonic", set_intel_mnemonic
, 1},
1380 {"att_mnemonic", set_intel_mnemonic
, 0},
1381 {"allow_index_reg", set_allow_index_reg
, 1},
1382 {"disallow_index_reg", set_allow_index_reg
, 0},
1383 {"sse_check", set_check
, 0},
1384 {"operand_check", set_check
, 1},
1385 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1386 {"largecomm", handle_large_common
, 0},
1388 {"file", dwarf2_directive_file
, 0},
1389 {"loc", dwarf2_directive_loc
, 0},
1390 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1393 {"secrel32", pe_directive_secrel
, 0},
1398 /* For interface with expression (). */
1399 extern char *input_line_pointer
;
1401 /* Hash table for instruction mnemonic lookup. */
1402 static htab_t op_hash
;
1404 /* Hash table for register lookup. */
1405 static htab_t reg_hash
;
1407 /* Various efficient no-op patterns for aligning code labels.
1408 Note: Don't try to assemble the instructions in the comments.
1409 0L and 0w are not legal. */
1410 static const unsigned char f32_1
[] =
1412 static const unsigned char f32_2
[] =
1413 {0x66,0x90}; /* xchg %ax,%ax */
1414 static const unsigned char f32_3
[] =
1415 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1416 static const unsigned char f32_4
[] =
1417 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1418 static const unsigned char f32_6
[] =
1419 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1420 static const unsigned char f32_7
[] =
1421 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1422 static const unsigned char f16_3
[] =
1423 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1424 static const unsigned char f16_4
[] =
1425 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1426 static const unsigned char jump_disp8
[] =
1427 {0xeb}; /* jmp disp8 */
1428 static const unsigned char jump32_disp32
[] =
1429 {0xe9}; /* jmp disp32 */
1430 static const unsigned char jump16_disp32
[] =
1431 {0x66,0xe9}; /* jmp disp32 */
1432 /* 32-bit NOPs patterns. */
1433 static const unsigned char *const f32_patt
[] = {
1434 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1436 /* 16-bit NOPs patterns. */
1437 static const unsigned char *const f16_patt
[] = {
1438 f32_1
, f32_2
, f16_3
, f16_4
1440 /* nopl (%[re]ax) */
1441 static const unsigned char alt_3
[] =
1443 /* nopl 0(%[re]ax) */
1444 static const unsigned char alt_4
[] =
1445 {0x0f,0x1f,0x40,0x00};
1446 /* nopl 0(%[re]ax,%[re]ax,1) */
1447 static const unsigned char alt_5
[] =
1448 {0x0f,0x1f,0x44,0x00,0x00};
1449 /* nopw 0(%[re]ax,%[re]ax,1) */
1450 static const unsigned char alt_6
[] =
1451 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1452 /* nopl 0L(%[re]ax) */
1453 static const unsigned char alt_7
[] =
1454 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1455 /* nopl 0L(%[re]ax,%[re]ax,1) */
1456 static const unsigned char alt_8
[] =
1457 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1458 /* nopw 0L(%[re]ax,%[re]ax,1) */
1459 static const unsigned char alt_9
[] =
1460 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1461 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1462 static const unsigned char alt_10
[] =
1463 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1464 /* data16 nopw %cs:0L(%eax,%eax,1) */
1465 static const unsigned char alt_11
[] =
1466 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1467 /* 32-bit and 64-bit NOPs patterns. */
1468 static const unsigned char *const alt_patt
[] = {
1469 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1470 alt_9
, alt_10
, alt_11
1473 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1474 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1477 i386_output_nops (char *where
, const unsigned char *const *patt
,
1478 int count
, int max_single_nop_size
)
1481 /* Place the longer NOP first. */
1484 const unsigned char *nops
;
1486 if (max_single_nop_size
< 1)
1488 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1489 max_single_nop_size
);
1493 nops
= patt
[max_single_nop_size
- 1];
1495 /* Use the smaller one if the requsted one isn't available. */
1498 max_single_nop_size
--;
1499 nops
= patt
[max_single_nop_size
- 1];
1502 last
= count
% max_single_nop_size
;
1505 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1506 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1510 nops
= patt
[last
- 1];
1513 /* Use the smaller one plus one-byte NOP if the needed one
1516 nops
= patt
[last
- 1];
1517 memcpy (where
+ offset
, nops
, last
);
1518 where
[offset
+ last
] = *patt
[0];
1521 memcpy (where
+ offset
, nops
, last
);
1526 fits_in_imm7 (offsetT num
)
1528 return (num
& 0x7f) == num
;
1532 fits_in_imm31 (offsetT num
)
1534 return (num
& 0x7fffffff) == num
;
1537 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1538 single NOP instruction LIMIT. */
1541 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1543 const unsigned char *const *patt
= NULL
;
1544 int max_single_nop_size
;
1545 /* Maximum number of NOPs before switching to jump over NOPs. */
1546 int max_number_of_nops
;
1548 switch (fragP
->fr_type
)
1553 case rs_machine_dependent
:
1554 /* Allow NOP padding for jumps and calls. */
1555 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1556 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1563 /* We need to decide which NOP sequence to use for 32bit and
1564 64bit. When -mtune= is used:
1566 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1567 PROCESSOR_GENERIC32, f32_patt will be used.
1568 2. For the rest, alt_patt will be used.
1570 When -mtune= isn't used, alt_patt will be used if
1571 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1574 When -march= or .arch is used, we can't use anything beyond
1575 cpu_arch_isa_flags. */
1577 if (flag_code
== CODE_16BIT
)
1580 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1581 /* Limit number of NOPs to 2 in 16-bit mode. */
1582 max_number_of_nops
= 2;
1586 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1588 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1589 switch (cpu_arch_tune
)
1591 case PROCESSOR_UNKNOWN
:
1592 /* We use cpu_arch_isa_flags to check if we SHOULD
1593 optimize with nops. */
1594 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1599 case PROCESSOR_PENTIUM4
:
1600 case PROCESSOR_NOCONA
:
1601 case PROCESSOR_CORE
:
1602 case PROCESSOR_CORE2
:
1603 case PROCESSOR_COREI7
:
1604 case PROCESSOR_L1OM
:
1605 case PROCESSOR_K1OM
:
1606 case PROCESSOR_GENERIC64
:
1608 case PROCESSOR_ATHLON
:
1610 case PROCESSOR_AMDFAM10
:
1612 case PROCESSOR_ZNVER
:
1616 case PROCESSOR_I386
:
1617 case PROCESSOR_I486
:
1618 case PROCESSOR_PENTIUM
:
1619 case PROCESSOR_PENTIUMPRO
:
1620 case PROCESSOR_IAMCU
:
1621 case PROCESSOR_GENERIC32
:
1628 switch (fragP
->tc_frag_data
.tune
)
1630 case PROCESSOR_UNKNOWN
:
1631 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1632 PROCESSOR_UNKNOWN. */
1636 case PROCESSOR_I386
:
1637 case PROCESSOR_I486
:
1638 case PROCESSOR_PENTIUM
:
1639 case PROCESSOR_IAMCU
:
1641 case PROCESSOR_ATHLON
:
1643 case PROCESSOR_AMDFAM10
:
1645 case PROCESSOR_ZNVER
:
1647 case PROCESSOR_GENERIC32
:
1648 /* We use cpu_arch_isa_flags to check if we CAN optimize
1650 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1655 case PROCESSOR_PENTIUMPRO
:
1656 case PROCESSOR_PENTIUM4
:
1657 case PROCESSOR_NOCONA
:
1658 case PROCESSOR_CORE
:
1659 case PROCESSOR_CORE2
:
1660 case PROCESSOR_COREI7
:
1661 case PROCESSOR_L1OM
:
1662 case PROCESSOR_K1OM
:
1663 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1668 case PROCESSOR_GENERIC64
:
1674 if (patt
== f32_patt
)
1676 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1677 /* Limit number of NOPs to 2 for older processors. */
1678 max_number_of_nops
= 2;
1682 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1683 /* Limit number of NOPs to 7 for newer processors. */
1684 max_number_of_nops
= 7;
1689 limit
= max_single_nop_size
;
1691 if (fragP
->fr_type
== rs_fill_nop
)
1693 /* Output NOPs for .nop directive. */
1694 if (limit
> max_single_nop_size
)
1696 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1697 _("invalid single nop size: %d "
1698 "(expect within [0, %d])"),
1699 limit
, max_single_nop_size
);
1703 else if (fragP
->fr_type
!= rs_machine_dependent
)
1704 fragP
->fr_var
= count
;
1706 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1708 /* Generate jump over NOPs. */
1709 offsetT disp
= count
- 2;
1710 if (fits_in_imm7 (disp
))
1712 /* Use "jmp disp8" if possible. */
1714 where
[0] = jump_disp8
[0];
1720 unsigned int size_of_jump
;
1722 if (flag_code
== CODE_16BIT
)
1724 where
[0] = jump16_disp32
[0];
1725 where
[1] = jump16_disp32
[1];
1730 where
[0] = jump32_disp32
[0];
1734 count
-= size_of_jump
+ 4;
1735 if (!fits_in_imm31 (count
))
1737 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1738 _("jump over nop padding out of range"));
1742 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1743 where
+= size_of_jump
+ 4;
1747 /* Generate multiple NOPs. */
1748 i386_output_nops (where
, patt
, count
, limit
);
1752 operand_type_all_zero (const union i386_operand_type
*x
)
1754 switch (ARRAY_SIZE(x
->array
))
1765 return !x
->array
[0];
1772 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1774 switch (ARRAY_SIZE(x
->array
))
1790 x
->bitfield
.class = ClassNone
;
1791 x
->bitfield
.instance
= InstanceNone
;
1795 operand_type_equal (const union i386_operand_type
*x
,
1796 const union i386_operand_type
*y
)
1798 switch (ARRAY_SIZE(x
->array
))
1801 if (x
->array
[2] != y
->array
[2])
1805 if (x
->array
[1] != y
->array
[1])
1809 return x
->array
[0] == y
->array
[0];
1817 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1819 switch (ARRAY_SIZE(x
->array
))
1834 return !x
->array
[0];
1841 cpu_flags_equal (const union i386_cpu_flags
*x
,
1842 const union i386_cpu_flags
*y
)
1844 switch (ARRAY_SIZE(x
->array
))
1847 if (x
->array
[3] != y
->array
[3])
1851 if (x
->array
[2] != y
->array
[2])
1855 if (x
->array
[1] != y
->array
[1])
1859 return x
->array
[0] == y
->array
[0];
1867 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1869 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1870 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1873 static INLINE i386_cpu_flags
1874 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1876 switch (ARRAY_SIZE (x
.array
))
1879 x
.array
[3] &= y
.array
[3];
1882 x
.array
[2] &= y
.array
[2];
1885 x
.array
[1] &= y
.array
[1];
1888 x
.array
[0] &= y
.array
[0];
1896 static INLINE i386_cpu_flags
1897 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1899 switch (ARRAY_SIZE (x
.array
))
1902 x
.array
[3] |= y
.array
[3];
1905 x
.array
[2] |= y
.array
[2];
1908 x
.array
[1] |= y
.array
[1];
1911 x
.array
[0] |= y
.array
[0];
1919 static INLINE i386_cpu_flags
1920 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1922 switch (ARRAY_SIZE (x
.array
))
1925 x
.array
[3] &= ~y
.array
[3];
1928 x
.array
[2] &= ~y
.array
[2];
1931 x
.array
[1] &= ~y
.array
[1];
1934 x
.array
[0] &= ~y
.array
[0];
1942 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1944 #define CPU_FLAGS_ARCH_MATCH 0x1
1945 #define CPU_FLAGS_64BIT_MATCH 0x2
1947 #define CPU_FLAGS_PERFECT_MATCH \
1948 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1950 /* Return CPU flags match bits. */
1953 cpu_flags_match (const insn_template
*t
)
1955 i386_cpu_flags x
= t
->cpu_flags
;
1956 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1958 x
.bitfield
.cpu64
= 0;
1959 x
.bitfield
.cpuno64
= 0;
1961 if (cpu_flags_all_zero (&x
))
1963 /* This instruction is available on all archs. */
1964 match
|= CPU_FLAGS_ARCH_MATCH
;
1968 /* This instruction is available only on some archs. */
1969 i386_cpu_flags cpu
= cpu_arch_flags
;
1971 /* AVX512VL is no standalone feature - match it and then strip it. */
1972 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1974 x
.bitfield
.cpuavx512vl
= 0;
1976 cpu
= cpu_flags_and (x
, cpu
);
1977 if (!cpu_flags_all_zero (&cpu
))
1979 if (x
.bitfield
.cpuavx
)
1981 /* We need to check a few extra flags with AVX. */
1982 if (cpu
.bitfield
.cpuavx
1983 && (!t
->opcode_modifier
.sse2avx
1984 || (sse2avx
&& !i
.prefix
[DATA_PREFIX
]))
1985 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1986 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1987 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1988 match
|= CPU_FLAGS_ARCH_MATCH
;
1990 else if (x
.bitfield
.cpuavx512f
)
1992 /* We need to check a few extra flags with AVX512F. */
1993 if (cpu
.bitfield
.cpuavx512f
1994 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1995 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1996 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1997 match
|= CPU_FLAGS_ARCH_MATCH
;
2000 match
|= CPU_FLAGS_ARCH_MATCH
;
2006 static INLINE i386_operand_type
2007 operand_type_and (i386_operand_type x
, i386_operand_type y
)
2009 if (x
.bitfield
.class != y
.bitfield
.class)
2010 x
.bitfield
.class = ClassNone
;
2011 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
2012 x
.bitfield
.instance
= InstanceNone
;
2014 switch (ARRAY_SIZE (x
.array
))
2017 x
.array
[2] &= y
.array
[2];
2020 x
.array
[1] &= y
.array
[1];
2023 x
.array
[0] &= y
.array
[0];
2031 static INLINE i386_operand_type
2032 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
2034 gas_assert (y
.bitfield
.class == ClassNone
);
2035 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2037 switch (ARRAY_SIZE (x
.array
))
2040 x
.array
[2] &= ~y
.array
[2];
2043 x
.array
[1] &= ~y
.array
[1];
2046 x
.array
[0] &= ~y
.array
[0];
2054 static INLINE i386_operand_type
2055 operand_type_or (i386_operand_type x
, i386_operand_type y
)
2057 gas_assert (x
.bitfield
.class == ClassNone
||
2058 y
.bitfield
.class == ClassNone
||
2059 x
.bitfield
.class == y
.bitfield
.class);
2060 gas_assert (x
.bitfield
.instance
== InstanceNone
||
2061 y
.bitfield
.instance
== InstanceNone
||
2062 x
.bitfield
.instance
== y
.bitfield
.instance
);
2064 switch (ARRAY_SIZE (x
.array
))
2067 x
.array
[2] |= y
.array
[2];
2070 x
.array
[1] |= y
.array
[1];
2073 x
.array
[0] |= y
.array
[0];
2081 static INLINE i386_operand_type
2082 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
2084 gas_assert (y
.bitfield
.class == ClassNone
);
2085 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2087 switch (ARRAY_SIZE (x
.array
))
2090 x
.array
[2] ^= y
.array
[2];
2093 x
.array
[1] ^= y
.array
[1];
2096 x
.array
[0] ^= y
.array
[0];
2104 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
2105 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
2106 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
2107 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2108 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2109 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2110 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2111 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2112 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2113 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2114 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2115 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2116 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2117 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2118 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2119 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2120 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2131 operand_type_check (i386_operand_type t
, enum operand_type c
)
2136 return t
.bitfield
.class == Reg
;
2139 return (t
.bitfield
.imm8
2143 || t
.bitfield
.imm32s
2144 || t
.bitfield
.imm64
);
2147 return (t
.bitfield
.disp8
2148 || t
.bitfield
.disp16
2149 || t
.bitfield
.disp32
2150 || t
.bitfield
.disp32s
2151 || t
.bitfield
.disp64
);
2154 return (t
.bitfield
.disp8
2155 || t
.bitfield
.disp16
2156 || t
.bitfield
.disp32
2157 || t
.bitfield
.disp32s
2158 || t
.bitfield
.disp64
2159 || t
.bitfield
.baseindex
);
2168 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2169 between operand GIVEN and opeand WANTED for instruction template T. */
2172 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2175 return !((i
.types
[given
].bitfield
.byte
2176 && !t
->operand_types
[wanted
].bitfield
.byte
)
2177 || (i
.types
[given
].bitfield
.word
2178 && !t
->operand_types
[wanted
].bitfield
.word
)
2179 || (i
.types
[given
].bitfield
.dword
2180 && !t
->operand_types
[wanted
].bitfield
.dword
)
2181 || (i
.types
[given
].bitfield
.qword
2182 && !t
->operand_types
[wanted
].bitfield
.qword
)
2183 || (i
.types
[given
].bitfield
.tbyte
2184 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2187 /* Return 1 if there is no conflict in SIMD register between operand
2188 GIVEN and opeand WANTED for instruction template T. */
2191 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2194 return !((i
.types
[given
].bitfield
.xmmword
2195 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2196 || (i
.types
[given
].bitfield
.ymmword
2197 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2198 || (i
.types
[given
].bitfield
.zmmword
2199 && !t
->operand_types
[wanted
].bitfield
.zmmword
)
2200 || (i
.types
[given
].bitfield
.tmmword
2201 && !t
->operand_types
[wanted
].bitfield
.tmmword
));
2204 /* Return 1 if there is no conflict in any size between operand GIVEN
2205 and opeand WANTED for instruction template T. */
2208 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2211 return (match_operand_size (t
, wanted
, given
)
2212 && !((i
.types
[given
].bitfield
.unspecified
2213 && !i
.broadcast
.type
2214 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2215 || (i
.types
[given
].bitfield
.fword
2216 && !t
->operand_types
[wanted
].bitfield
.fword
)
2217 /* For scalar opcode templates to allow register and memory
2218 operands at the same time, some special casing is needed
2219 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2220 down-conversion vpmov*. */
2221 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2222 && t
->operand_types
[wanted
].bitfield
.byte
2223 + t
->operand_types
[wanted
].bitfield
.word
2224 + t
->operand_types
[wanted
].bitfield
.dword
2225 + t
->operand_types
[wanted
].bitfield
.qword
2226 > !!t
->opcode_modifier
.broadcast
)
2227 ? (i
.types
[given
].bitfield
.xmmword
2228 || i
.types
[given
].bitfield
.ymmword
2229 || i
.types
[given
].bitfield
.zmmword
)
2230 : !match_simd_size(t
, wanted
, given
))));
2233 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2234 operands for instruction template T, and it has MATCH_REVERSE set if there
2235 is no size conflict on any operands for the template with operands reversed
2236 (and the template allows for reversing in the first place). */
2238 #define MATCH_STRAIGHT 1
2239 #define MATCH_REVERSE 2
2241 static INLINE
unsigned int
2242 operand_size_match (const insn_template
*t
)
2244 unsigned int j
, match
= MATCH_STRAIGHT
;
2246 /* Don't check non-absolute jump instructions. */
2247 if (t
->opcode_modifier
.jump
2248 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2251 /* Check memory and accumulator operand size. */
2252 for (j
= 0; j
< i
.operands
; j
++)
2254 if (i
.types
[j
].bitfield
.class != Reg
2255 && i
.types
[j
].bitfield
.class != RegSIMD
2256 && t
->opcode_modifier
.anysize
)
2259 if (t
->operand_types
[j
].bitfield
.class == Reg
2260 && !match_operand_size (t
, j
, j
))
2266 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2267 && !match_simd_size (t
, j
, j
))
2273 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2274 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2280 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2287 if (!t
->opcode_modifier
.d
)
2291 i
.error
= operand_size_mismatch
;
2295 /* Check reverse. */
2296 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2298 for (j
= 0; j
< i
.operands
; j
++)
2300 unsigned int given
= i
.operands
- j
- 1;
2302 if (t
->operand_types
[j
].bitfield
.class == Reg
2303 && !match_operand_size (t
, j
, given
))
2306 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2307 && !match_simd_size (t
, j
, given
))
2310 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2311 && (!match_operand_size (t
, j
, given
)
2312 || !match_simd_size (t
, j
, given
)))
2315 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2319 return match
| MATCH_REVERSE
;
2323 operand_type_match (i386_operand_type overlap
,
2324 i386_operand_type given
)
2326 i386_operand_type temp
= overlap
;
2328 temp
.bitfield
.unspecified
= 0;
2329 temp
.bitfield
.byte
= 0;
2330 temp
.bitfield
.word
= 0;
2331 temp
.bitfield
.dword
= 0;
2332 temp
.bitfield
.fword
= 0;
2333 temp
.bitfield
.qword
= 0;
2334 temp
.bitfield
.tbyte
= 0;
2335 temp
.bitfield
.xmmword
= 0;
2336 temp
.bitfield
.ymmword
= 0;
2337 temp
.bitfield
.zmmword
= 0;
2338 temp
.bitfield
.tmmword
= 0;
2339 if (operand_type_all_zero (&temp
))
2342 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2346 i
.error
= operand_type_mismatch
;
2350 /* If given types g0 and g1 are registers they must be of the same type
2351 unless the expected operand type register overlap is null.
2352 Some Intel syntax memory operand size checking also happens here. */
2355 operand_type_register_match (i386_operand_type g0
,
2356 i386_operand_type t0
,
2357 i386_operand_type g1
,
2358 i386_operand_type t1
)
2360 if (g0
.bitfield
.class != Reg
2361 && g0
.bitfield
.class != RegSIMD
2362 && (!operand_type_check (g0
, anymem
)
2363 || g0
.bitfield
.unspecified
2364 || (t0
.bitfield
.class != Reg
2365 && t0
.bitfield
.class != RegSIMD
)))
2368 if (g1
.bitfield
.class != Reg
2369 && g1
.bitfield
.class != RegSIMD
2370 && (!operand_type_check (g1
, anymem
)
2371 || g1
.bitfield
.unspecified
2372 || (t1
.bitfield
.class != Reg
2373 && t1
.bitfield
.class != RegSIMD
)))
2376 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2377 && g0
.bitfield
.word
== g1
.bitfield
.word
2378 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2379 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2380 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2381 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2382 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2385 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2386 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2387 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2388 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2389 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2390 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2391 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2394 i
.error
= register_type_mismatch
;
2399 static INLINE
unsigned int
2400 register_number (const reg_entry
*r
)
2402 unsigned int nr
= r
->reg_num
;
2404 if (r
->reg_flags
& RegRex
)
2407 if (r
->reg_flags
& RegVRex
)
2413 static INLINE
unsigned int
2414 mode_from_disp_size (i386_operand_type t
)
2416 if (t
.bitfield
.disp8
)
2418 else if (t
.bitfield
.disp16
2419 || t
.bitfield
.disp32
2420 || t
.bitfield
.disp32s
)
2427 fits_in_signed_byte (addressT num
)
2429 return num
+ 0x80 <= 0xff;
2433 fits_in_unsigned_byte (addressT num
)
2439 fits_in_unsigned_word (addressT num
)
2441 return num
<= 0xffff;
2445 fits_in_signed_word (addressT num
)
2447 return num
+ 0x8000 <= 0xffff;
2451 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2456 return num
+ 0x80000000 <= 0xffffffff;
2458 } /* fits_in_signed_long() */
2461 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2466 return num
<= 0xffffffff;
2468 } /* fits_in_unsigned_long() */
2471 fits_in_disp8 (offsetT num
)
2473 int shift
= i
.memshift
;
2479 mask
= (1 << shift
) - 1;
2481 /* Return 0 if NUM isn't properly aligned. */
2485 /* Check if NUM will fit in 8bit after shift. */
2486 return fits_in_signed_byte (num
>> shift
);
2490 fits_in_imm4 (offsetT num
)
2492 return (num
& 0xf) == num
;
2495 static i386_operand_type
2496 smallest_imm_type (offsetT num
)
2498 i386_operand_type t
;
2500 operand_type_set (&t
, 0);
2501 t
.bitfield
.imm64
= 1;
2503 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2505 /* This code is disabled on the 486 because all the Imm1 forms
2506 in the opcode table are slower on the i486. They're the
2507 versions with the implicitly specified single-position
2508 displacement, which has another syntax if you really want to
2510 t
.bitfield
.imm1
= 1;
2511 t
.bitfield
.imm8
= 1;
2512 t
.bitfield
.imm8s
= 1;
2513 t
.bitfield
.imm16
= 1;
2514 t
.bitfield
.imm32
= 1;
2515 t
.bitfield
.imm32s
= 1;
2517 else if (fits_in_signed_byte (num
))
2519 t
.bitfield
.imm8
= 1;
2520 t
.bitfield
.imm8s
= 1;
2521 t
.bitfield
.imm16
= 1;
2522 t
.bitfield
.imm32
= 1;
2523 t
.bitfield
.imm32s
= 1;
2525 else if (fits_in_unsigned_byte (num
))
2527 t
.bitfield
.imm8
= 1;
2528 t
.bitfield
.imm16
= 1;
2529 t
.bitfield
.imm32
= 1;
2530 t
.bitfield
.imm32s
= 1;
2532 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2534 t
.bitfield
.imm16
= 1;
2535 t
.bitfield
.imm32
= 1;
2536 t
.bitfield
.imm32s
= 1;
2538 else if (fits_in_signed_long (num
))
2540 t
.bitfield
.imm32
= 1;
2541 t
.bitfield
.imm32s
= 1;
2543 else if (fits_in_unsigned_long (num
))
2544 t
.bitfield
.imm32
= 1;
2550 offset_in_range (offsetT val
, int size
)
2556 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2557 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2558 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2560 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2565 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2567 char buf1
[40], buf2
[40];
2569 sprint_value (buf1
, val
);
2570 sprint_value (buf2
, val
& mask
);
2571 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2586 a. PREFIX_EXIST if attempting to add a prefix where one from the
2587 same class already exists.
2588 b. PREFIX_LOCK if lock prefix is added.
2589 c. PREFIX_REP if rep/repne prefix is added.
2590 d. PREFIX_DS if ds prefix is added.
2591 e. PREFIX_OTHER if other prefix is added.
2594 static enum PREFIX_GROUP
2595 add_prefix (unsigned int prefix
)
2597 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2600 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2601 && flag_code
== CODE_64BIT
)
2603 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2604 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2605 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2606 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2617 case DS_PREFIX_OPCODE
:
2620 case CS_PREFIX_OPCODE
:
2621 case ES_PREFIX_OPCODE
:
2622 case FS_PREFIX_OPCODE
:
2623 case GS_PREFIX_OPCODE
:
2624 case SS_PREFIX_OPCODE
:
2628 case REPNE_PREFIX_OPCODE
:
2629 case REPE_PREFIX_OPCODE
:
2634 case LOCK_PREFIX_OPCODE
:
2643 case ADDR_PREFIX_OPCODE
:
2647 case DATA_PREFIX_OPCODE
:
2651 if (i
.prefix
[q
] != 0)
2659 i
.prefix
[q
] |= prefix
;
2662 as_bad (_("same type of prefix used twice"));
2668 update_code_flag (int value
, int check
)
2670 PRINTF_LIKE ((*as_error
));
2672 flag_code
= (enum flag_code
) value
;
2673 if (flag_code
== CODE_64BIT
)
2675 cpu_arch_flags
.bitfield
.cpu64
= 1;
2676 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2680 cpu_arch_flags
.bitfield
.cpu64
= 0;
2681 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2683 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2686 as_error
= as_fatal
;
2689 (*as_error
) (_("64bit mode not supported on `%s'."),
2690 cpu_arch_name
? cpu_arch_name
: default_arch
);
2692 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2695 as_error
= as_fatal
;
2698 (*as_error
) (_("32bit mode not supported on `%s'."),
2699 cpu_arch_name
? cpu_arch_name
: default_arch
);
2701 stackop_size
= '\0';
2705 set_code_flag (int value
)
2707 update_code_flag (value
, 0);
2711 set_16bit_gcc_code_flag (int new_code_flag
)
2713 flag_code
= (enum flag_code
) new_code_flag
;
2714 if (flag_code
!= CODE_16BIT
)
2716 cpu_arch_flags
.bitfield
.cpu64
= 0;
2717 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2718 stackop_size
= LONG_MNEM_SUFFIX
;
2722 set_intel_syntax (int syntax_flag
)
2724 /* Find out if register prefixing is specified. */
2725 int ask_naked_reg
= 0;
2728 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2731 int e
= get_symbol_name (&string
);
2733 if (strcmp (string
, "prefix") == 0)
2735 else if (strcmp (string
, "noprefix") == 0)
2738 as_bad (_("bad argument to syntax directive."));
2739 (void) restore_line_pointer (e
);
2741 demand_empty_rest_of_line ();
2743 intel_syntax
= syntax_flag
;
2745 if (ask_naked_reg
== 0)
2746 allow_naked_reg
= (intel_syntax
2747 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2749 allow_naked_reg
= (ask_naked_reg
< 0);
2751 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2753 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2754 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2755 register_prefix
= allow_naked_reg
? "" : "%";
2759 set_intel_mnemonic (int mnemonic_flag
)
2761 intel_mnemonic
= mnemonic_flag
;
2765 set_allow_index_reg (int flag
)
2767 allow_index_reg
= flag
;
2771 set_check (int what
)
2773 enum check_kind
*kind
;
2778 kind
= &operand_check
;
2789 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2792 int e
= get_symbol_name (&string
);
2794 if (strcmp (string
, "none") == 0)
2796 else if (strcmp (string
, "warning") == 0)
2797 *kind
= check_warning
;
2798 else if (strcmp (string
, "error") == 0)
2799 *kind
= check_error
;
2801 as_bad (_("bad argument to %s_check directive."), str
);
2802 (void) restore_line_pointer (e
);
2805 as_bad (_("missing argument for %s_check directive"), str
);
2807 demand_empty_rest_of_line ();
2811 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2812 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2814 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2815 static const char *arch
;
2817 /* Intel LIOM is only supported on ELF. */
2823 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2824 use default_arch. */
2825 arch
= cpu_arch_name
;
2827 arch
= default_arch
;
2830 /* If we are targeting Intel MCU, we must enable it. */
2831 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2832 || new_flag
.bitfield
.cpuiamcu
)
2835 /* If we are targeting Intel L1OM, we must enable it. */
2836 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2837 || new_flag
.bitfield
.cpul1om
)
2840 /* If we are targeting Intel K1OM, we must enable it. */
2841 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2842 || new_flag
.bitfield
.cpuk1om
)
2845 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2850 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2854 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2857 int e
= get_symbol_name (&string
);
2859 i386_cpu_flags flags
;
2861 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2863 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2865 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2869 cpu_arch_name
= cpu_arch
[j
].name
;
2870 cpu_sub_arch_name
= NULL
;
2871 cpu_arch_flags
= cpu_arch
[j
].flags
;
2872 if (flag_code
== CODE_64BIT
)
2874 cpu_arch_flags
.bitfield
.cpu64
= 1;
2875 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2879 cpu_arch_flags
.bitfield
.cpu64
= 0;
2880 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2882 cpu_arch_isa
= cpu_arch
[j
].type
;
2883 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2884 if (!cpu_arch_tune_set
)
2886 cpu_arch_tune
= cpu_arch_isa
;
2887 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2892 flags
= cpu_flags_or (cpu_arch_flags
,
2895 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2897 if (cpu_sub_arch_name
)
2899 char *name
= cpu_sub_arch_name
;
2900 cpu_sub_arch_name
= concat (name
,
2902 (const char *) NULL
);
2906 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2907 cpu_arch_flags
= flags
;
2908 cpu_arch_isa_flags
= flags
;
2912 = cpu_flags_or (cpu_arch_isa_flags
,
2914 (void) restore_line_pointer (e
);
2915 demand_empty_rest_of_line ();
2920 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2922 /* Disable an ISA extension. */
2923 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2924 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2926 flags
= cpu_flags_and_not (cpu_arch_flags
,
2927 cpu_noarch
[j
].flags
);
2928 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2930 if (cpu_sub_arch_name
)
2932 char *name
= cpu_sub_arch_name
;
2933 cpu_sub_arch_name
= concat (name
, string
,
2934 (const char *) NULL
);
2938 cpu_sub_arch_name
= xstrdup (string
);
2939 cpu_arch_flags
= flags
;
2940 cpu_arch_isa_flags
= flags
;
2942 (void) restore_line_pointer (e
);
2943 demand_empty_rest_of_line ();
2947 j
= ARRAY_SIZE (cpu_arch
);
2950 if (j
>= ARRAY_SIZE (cpu_arch
))
2951 as_bad (_("no such architecture: `%s'"), string
);
2953 *input_line_pointer
= e
;
2956 as_bad (_("missing cpu architecture"));
2958 no_cond_jump_promotion
= 0;
2959 if (*input_line_pointer
== ','
2960 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2965 ++input_line_pointer
;
2966 e
= get_symbol_name (&string
);
2968 if (strcmp (string
, "nojumps") == 0)
2969 no_cond_jump_promotion
= 1;
2970 else if (strcmp (string
, "jumps") == 0)
2973 as_bad (_("no such architecture modifier: `%s'"), string
);
2975 (void) restore_line_pointer (e
);
2978 demand_empty_rest_of_line ();
2981 enum bfd_architecture
2984 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2986 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2987 || flag_code
!= CODE_64BIT
)
2988 as_fatal (_("Intel L1OM is 64bit ELF only"));
2989 return bfd_arch_l1om
;
2991 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2993 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2994 || flag_code
!= CODE_64BIT
)
2995 as_fatal (_("Intel K1OM is 64bit ELF only"));
2996 return bfd_arch_k1om
;
2998 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3000 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3001 || flag_code
== CODE_64BIT
)
3002 as_fatal (_("Intel MCU is 32bit ELF only"));
3003 return bfd_arch_iamcu
;
3006 return bfd_arch_i386
;
3012 if (!strncmp (default_arch
, "x86_64", 6))
3014 if (cpu_arch_isa
== PROCESSOR_L1OM
)
3016 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3017 || default_arch
[6] != '\0')
3018 as_fatal (_("Intel L1OM is 64bit ELF only"));
3019 return bfd_mach_l1om
;
3021 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
3023 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3024 || default_arch
[6] != '\0')
3025 as_fatal (_("Intel K1OM is 64bit ELF only"));
3026 return bfd_mach_k1om
;
3028 else if (default_arch
[6] == '\0')
3029 return bfd_mach_x86_64
;
3031 return bfd_mach_x64_32
;
3033 else if (!strcmp (default_arch
, "i386")
3034 || !strcmp (default_arch
, "iamcu"))
3036 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3038 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
3039 as_fatal (_("Intel MCU is 32bit ELF only"));
3040 return bfd_mach_i386_iamcu
;
3043 return bfd_mach_i386_i386
;
3046 as_fatal (_("unknown architecture"));
3052 /* Support pseudo prefixes like {disp32}. */
3053 lex_type
['{'] = LEX_BEGIN_NAME
;
3055 /* Initialize op_hash hash table. */
3056 op_hash
= str_htab_create ();
3059 const insn_template
*optab
;
3060 templates
*core_optab
;
3062 /* Setup for loop. */
3064 core_optab
= XNEW (templates
);
3065 core_optab
->start
= optab
;
3070 if (optab
->name
== NULL
3071 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
3073 /* different name --> ship out current template list;
3074 add to hash table; & begin anew. */
3075 core_optab
->end
= optab
;
3076 if (str_hash_insert (op_hash
, (optab
- 1)->name
, core_optab
, 0))
3077 as_fatal (_("duplicate %s"), (optab
- 1)->name
);
3079 if (optab
->name
== NULL
)
3081 core_optab
= XNEW (templates
);
3082 core_optab
->start
= optab
;
3087 /* Initialize reg_hash hash table. */
3088 reg_hash
= str_htab_create ();
3090 const reg_entry
*regtab
;
3091 unsigned int regtab_size
= i386_regtab_size
;
3093 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3095 switch (regtab
->reg_type
.bitfield
.class)
3098 if (regtab
->reg_type
.bitfield
.dword
)
3100 if (regtab
->reg_type
.bitfield
.instance
== Accum
)
3103 else if (regtab
->reg_type
.bitfield
.tbyte
)
3105 /* There's no point inserting st(<N>) in the hash table, as
3106 parentheses aren't included in register_chars[] anyway. */
3107 if (regtab
->reg_type
.bitfield
.instance
!= Accum
)
3114 switch (regtab
->reg_num
)
3116 case 0: reg_es
= regtab
; break;
3117 case 2: reg_ss
= regtab
; break;
3118 case 3: reg_ds
= regtab
; break;
3123 if (!regtab
->reg_num
)
3128 if (str_hash_insert (reg_hash
, regtab
->reg_name
, regtab
, 0) != NULL
)
3129 as_fatal (_("duplicate %s"), regtab
->reg_name
);
3133 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3138 for (c
= 0; c
< 256; c
++)
3143 mnemonic_chars
[c
] = c
;
3144 register_chars
[c
] = c
;
3145 operand_chars
[c
] = c
;
3147 else if (ISLOWER (c
))
3149 mnemonic_chars
[c
] = c
;
3150 register_chars
[c
] = c
;
3151 operand_chars
[c
] = c
;
3153 else if (ISUPPER (c
))
3155 mnemonic_chars
[c
] = TOLOWER (c
);
3156 register_chars
[c
] = mnemonic_chars
[c
];
3157 operand_chars
[c
] = c
;
3159 else if (c
== '{' || c
== '}')
3161 mnemonic_chars
[c
] = c
;
3162 operand_chars
[c
] = c
;
3164 #ifdef SVR4_COMMENT_CHARS
3165 else if (c
== '\\' && strchr (i386_comment_chars
, '/'))
3166 operand_chars
[c
] = c
;
3169 if (ISALPHA (c
) || ISDIGIT (c
))
3170 identifier_chars
[c
] = c
;
3173 identifier_chars
[c
] = c
;
3174 operand_chars
[c
] = c
;
3179 identifier_chars
['@'] = '@';
3182 identifier_chars
['?'] = '?';
3183 operand_chars
['?'] = '?';
3185 digit_chars
['-'] = '-';
3186 mnemonic_chars
['_'] = '_';
3187 mnemonic_chars
['-'] = '-';
3188 mnemonic_chars
['.'] = '.';
3189 identifier_chars
['_'] = '_';
3190 identifier_chars
['.'] = '.';
3192 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3193 operand_chars
[(unsigned char) *p
] = *p
;
3196 if (flag_code
== CODE_64BIT
)
3198 #if defined (OBJ_COFF) && defined (TE_PE)
3199 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3202 x86_dwarf2_return_column
= 16;
3204 x86_cie_data_alignment
= -8;
3208 x86_dwarf2_return_column
= 8;
3209 x86_cie_data_alignment
= -4;
3212 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3213 can be turned into BRANCH_PREFIX frag. */
3214 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3219 i386_print_statistics (FILE *file
)
3221 htab_print_statistics (file
, "i386 opcode", op_hash
);
3222 htab_print_statistics (file
, "i386 register", reg_hash
);
3227 /* Debugging routines for md_assemble. */
3228 static void pte (insn_template
*);
3229 static void pt (i386_operand_type
);
3230 static void pe (expressionS
*);
3231 static void ps (symbolS
*);
3234 pi (const char *line
, i386_insn
*x
)
3238 fprintf (stdout
, "%s: template ", line
);
3240 fprintf (stdout
, " address: base %s index %s scale %x\n",
3241 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3242 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3243 x
->log2_scale_factor
);
3244 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3245 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3246 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3247 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3248 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3249 (x
->rex
& REX_W
) != 0,
3250 (x
->rex
& REX_R
) != 0,
3251 (x
->rex
& REX_X
) != 0,
3252 (x
->rex
& REX_B
) != 0);
3253 for (j
= 0; j
< x
->operands
; j
++)
3255 fprintf (stdout
, " #%d: ", j
+ 1);
3257 fprintf (stdout
, "\n");
3258 if (x
->types
[j
].bitfield
.class == Reg
3259 || x
->types
[j
].bitfield
.class == RegMMX
3260 || x
->types
[j
].bitfield
.class == RegSIMD
3261 || x
->types
[j
].bitfield
.class == RegMask
3262 || x
->types
[j
].bitfield
.class == SReg
3263 || x
->types
[j
].bitfield
.class == RegCR
3264 || x
->types
[j
].bitfield
.class == RegDR
3265 || x
->types
[j
].bitfield
.class == RegTR
3266 || x
->types
[j
].bitfield
.class == RegBND
)
3267 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3268 if (operand_type_check (x
->types
[j
], imm
))
3270 if (operand_type_check (x
->types
[j
], disp
))
3271 pe (x
->op
[j
].disps
);
3276 pte (insn_template
*t
)
3278 static const unsigned char opc_pfx
[] = { 0, 0x66, 0xf3, 0xf2 };
3279 static const char *const opc_spc
[] = {
3280 NULL
, "0f", "0f38", "0f3a", NULL
, NULL
, NULL
, NULL
,
3281 "XOP08", "XOP09", "XOP0A",
3285 fprintf (stdout
, " %d operands ", t
->operands
);
3286 if (opc_pfx
[t
->opcode_modifier
.opcodeprefix
])
3287 fprintf (stdout
, "pfx %x ", opc_pfx
[t
->opcode_modifier
.opcodeprefix
]);
3288 if (opc_spc
[t
->opcode_modifier
.opcodespace
])
3289 fprintf (stdout
, "space %s ", opc_spc
[t
->opcode_modifier
.opcodespace
]);
3290 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3291 if (t
->extension_opcode
!= None
)
3292 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3293 if (t
->opcode_modifier
.d
)
3294 fprintf (stdout
, "D");
3295 if (t
->opcode_modifier
.w
)
3296 fprintf (stdout
, "W");
3297 fprintf (stdout
, "\n");
3298 for (j
= 0; j
< t
->operands
; j
++)
3300 fprintf (stdout
, " #%d type ", j
+ 1);
3301 pt (t
->operand_types
[j
]);
3302 fprintf (stdout
, "\n");
3309 fprintf (stdout
, " operation %d\n", e
->X_op
);
3310 fprintf (stdout
, " add_number %ld (%lx)\n",
3311 (long) e
->X_add_number
, (long) e
->X_add_number
);
3312 if (e
->X_add_symbol
)
3314 fprintf (stdout
, " add_symbol ");
3315 ps (e
->X_add_symbol
);
3316 fprintf (stdout
, "\n");
3320 fprintf (stdout
, " op_symbol ");
3321 ps (e
->X_op_symbol
);
3322 fprintf (stdout
, "\n");
3329 fprintf (stdout
, "%s type %s%s",
3331 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3332 segment_name (S_GET_SEGMENT (s
)));
3335 static struct type_name
3337 i386_operand_type mask
;
3340 const type_names
[] =
3342 { OPERAND_TYPE_REG8
, "r8" },
3343 { OPERAND_TYPE_REG16
, "r16" },
3344 { OPERAND_TYPE_REG32
, "r32" },
3345 { OPERAND_TYPE_REG64
, "r64" },
3346 { OPERAND_TYPE_ACC8
, "acc8" },
3347 { OPERAND_TYPE_ACC16
, "acc16" },
3348 { OPERAND_TYPE_ACC32
, "acc32" },
3349 { OPERAND_TYPE_ACC64
, "acc64" },
3350 { OPERAND_TYPE_IMM8
, "i8" },
3351 { OPERAND_TYPE_IMM8
, "i8s" },
3352 { OPERAND_TYPE_IMM16
, "i16" },
3353 { OPERAND_TYPE_IMM32
, "i32" },
3354 { OPERAND_TYPE_IMM32S
, "i32s" },
3355 { OPERAND_TYPE_IMM64
, "i64" },
3356 { OPERAND_TYPE_IMM1
, "i1" },
3357 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3358 { OPERAND_TYPE_DISP8
, "d8" },
3359 { OPERAND_TYPE_DISP16
, "d16" },
3360 { OPERAND_TYPE_DISP32
, "d32" },
3361 { OPERAND_TYPE_DISP32S
, "d32s" },
3362 { OPERAND_TYPE_DISP64
, "d64" },
3363 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3364 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3365 { OPERAND_TYPE_CONTROL
, "control reg" },
3366 { OPERAND_TYPE_TEST
, "test reg" },
3367 { OPERAND_TYPE_DEBUG
, "debug reg" },
3368 { OPERAND_TYPE_FLOATREG
, "FReg" },
3369 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3370 { OPERAND_TYPE_SREG
, "SReg" },
3371 { OPERAND_TYPE_REGMMX
, "rMMX" },
3372 { OPERAND_TYPE_REGXMM
, "rXMM" },
3373 { OPERAND_TYPE_REGYMM
, "rYMM" },
3374 { OPERAND_TYPE_REGZMM
, "rZMM" },
3375 { OPERAND_TYPE_REGTMM
, "rTMM" },
3376 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3380 pt (i386_operand_type t
)
3383 i386_operand_type a
;
3385 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3387 a
= operand_type_and (t
, type_names
[j
].mask
);
3388 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3389 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3394 #endif /* DEBUG386 */
3396 static bfd_reloc_code_real_type
3397 reloc (unsigned int size
,
3400 bfd_reloc_code_real_type other
)
3402 if (other
!= NO_RELOC
)
3404 reloc_howto_type
*rel
;
3409 case BFD_RELOC_X86_64_GOT32
:
3410 return BFD_RELOC_X86_64_GOT64
;
3412 case BFD_RELOC_X86_64_GOTPLT64
:
3413 return BFD_RELOC_X86_64_GOTPLT64
;
3415 case BFD_RELOC_X86_64_PLTOFF64
:
3416 return BFD_RELOC_X86_64_PLTOFF64
;
3418 case BFD_RELOC_X86_64_GOTPC32
:
3419 other
= BFD_RELOC_X86_64_GOTPC64
;
3421 case BFD_RELOC_X86_64_GOTPCREL
:
3422 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3424 case BFD_RELOC_X86_64_TPOFF32
:
3425 other
= BFD_RELOC_X86_64_TPOFF64
;
3427 case BFD_RELOC_X86_64_DTPOFF32
:
3428 other
= BFD_RELOC_X86_64_DTPOFF64
;
3434 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3435 if (other
== BFD_RELOC_SIZE32
)
3438 other
= BFD_RELOC_SIZE64
;
3441 as_bad (_("there are no pc-relative size relocations"));
3447 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3448 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3451 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3453 as_bad (_("unknown relocation (%u)"), other
);
3454 else if (size
!= bfd_get_reloc_size (rel
))
3455 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3456 bfd_get_reloc_size (rel
),
3458 else if (pcrel
&& !rel
->pc_relative
)
3459 as_bad (_("non-pc-relative relocation for pc-relative field"));
3460 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3462 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3464 as_bad (_("relocated field and relocation type differ in signedness"));
3473 as_bad (_("there are no unsigned pc-relative relocations"));
3476 case 1: return BFD_RELOC_8_PCREL
;
3477 case 2: return BFD_RELOC_16_PCREL
;
3478 case 4: return BFD_RELOC_32_PCREL
;
3479 case 8: return BFD_RELOC_64_PCREL
;
3481 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3488 case 4: return BFD_RELOC_X86_64_32S
;
3493 case 1: return BFD_RELOC_8
;
3494 case 2: return BFD_RELOC_16
;
3495 case 4: return BFD_RELOC_32
;
3496 case 8: return BFD_RELOC_64
;
3498 as_bad (_("cannot do %s %u byte relocation"),
3499 sign
> 0 ? "signed" : "unsigned", size
);
3505 /* Here we decide which fixups can be adjusted to make them relative to
3506 the beginning of the section instead of the symbol. Basically we need
3507 to make sure that the dynamic relocations are done correctly, so in
3508 some cases we force the original symbol to be used. */
3511 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3513 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3517 /* Don't adjust pc-relative references to merge sections in 64-bit
3519 if (use_rela_relocations
3520 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3524 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3525 and changed later by validate_fix. */
3526 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3527 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3530 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3531 for size relocations. */
3532 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3533 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3534 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3535 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3536 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3537 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3538 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3539 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3540 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3541 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3542 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3543 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3544 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3545 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3546 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3547 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3548 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3549 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3550 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3551 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3552 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3553 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3554 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3555 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3556 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3557 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3558 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3559 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3560 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3561 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3562 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3569 intel_float_operand (const char *mnemonic
)
3571 /* Note that the value returned is meaningful only for opcodes with (memory)
3572 operands, hence the code here is free to improperly handle opcodes that
3573 have no operands (for better performance and smaller code). */
3575 if (mnemonic
[0] != 'f')
3576 return 0; /* non-math */
3578 switch (mnemonic
[1])
3580 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3581 the fs segment override prefix not currently handled because no
3582 call path can make opcodes without operands get here */
3584 return 2 /* integer op */;
3586 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3587 return 3; /* fldcw/fldenv */
3590 if (mnemonic
[2] != 'o' /* fnop */)
3591 return 3; /* non-waiting control op */
3594 if (mnemonic
[2] == 's')
3595 return 3; /* frstor/frstpm */
3598 if (mnemonic
[2] == 'a')
3599 return 3; /* fsave */
3600 if (mnemonic
[2] == 't')
3602 switch (mnemonic
[3])
3604 case 'c': /* fstcw */
3605 case 'd': /* fstdw */
3606 case 'e': /* fstenv */
3607 case 's': /* fsts[gw] */
3613 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3614 return 0; /* fxsave/fxrstor are not really math ops */
3622 install_template (const insn_template
*t
)
3628 /* Note that for pseudo prefixes this produces a length of 1. But for them
3629 the length isn't interesting at all. */
3630 for (l
= 1; l
< 4; ++l
)
3631 if (!(t
->base_opcode
>> (8 * l
)))
3634 i
.opcode_length
= l
;
3637 /* Build the VEX prefix. */
3640 build_vex_prefix (const insn_template
*t
)
3642 unsigned int register_specifier
;
3643 unsigned int vector_length
;
3646 /* Check register specifier. */
3647 if (i
.vex
.register_specifier
)
3649 register_specifier
=
3650 ~register_number (i
.vex
.register_specifier
) & 0xf;
3651 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3654 register_specifier
= 0xf;
3656 /* Use 2-byte VEX prefix by swapping destination and source operand
3657 if there are more than 1 register operand. */
3658 if (i
.reg_operands
> 1
3659 && i
.vec_encoding
!= vex_encoding_vex3
3660 && i
.dir_encoding
== dir_encoding_default
3661 && i
.operands
== i
.reg_operands
3662 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3663 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3664 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3667 unsigned int xchg
= i
.operands
- 1;
3668 union i386_op temp_op
;
3669 i386_operand_type temp_type
;
3671 temp_type
= i
.types
[xchg
];
3672 i
.types
[xchg
] = i
.types
[0];
3673 i
.types
[0] = temp_type
;
3674 temp_op
= i
.op
[xchg
];
3675 i
.op
[xchg
] = i
.op
[0];
3678 gas_assert (i
.rm
.mode
== 3);
3682 i
.rm
.regmem
= i
.rm
.reg
;
3685 if (i
.tm
.opcode_modifier
.d
)
3686 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3687 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3688 else /* Use the next insn. */
3689 install_template (&t
[1]);
3692 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3693 are no memory operands and at least 3 register ones. */
3694 if (i
.reg_operands
>= 3
3695 && i
.vec_encoding
!= vex_encoding_vex3
3696 && i
.reg_operands
== i
.operands
- i
.imm_operands
3697 && i
.tm
.opcode_modifier
.vex
3698 && i
.tm
.opcode_modifier
.commutative
3699 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3701 && i
.vex
.register_specifier
3702 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3704 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3705 union i386_op temp_op
;
3706 i386_operand_type temp_type
;
3708 gas_assert (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
);
3709 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3710 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3711 &i
.types
[i
.operands
- 3]));
3712 gas_assert (i
.rm
.mode
== 3);
3714 temp_type
= i
.types
[xchg
];
3715 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3716 i
.types
[xchg
+ 1] = temp_type
;
3717 temp_op
= i
.op
[xchg
];
3718 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3719 i
.op
[xchg
+ 1] = temp_op
;
3722 xchg
= i
.rm
.regmem
| 8;
3723 i
.rm
.regmem
= ~register_specifier
& 0xf;
3724 gas_assert (!(i
.rm
.regmem
& 8));
3725 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3726 register_specifier
= ~xchg
& 0xf;
3729 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3730 vector_length
= avxscalar
;
3731 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3737 /* Determine vector length from the last multi-length vector
3740 for (op
= t
->operands
; op
--;)
3741 if (t
->operand_types
[op
].bitfield
.xmmword
3742 && t
->operand_types
[op
].bitfield
.ymmword
3743 && i
.types
[op
].bitfield
.ymmword
)
3750 /* Check the REX.W bit and VEXW. */
3751 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3752 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3753 else if (i
.tm
.opcode_modifier
.vexw
)
3754 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3756 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3758 /* Use 2-byte VEX prefix if possible. */
3760 && i
.vec_encoding
!= vex_encoding_vex3
3761 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3762 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3764 /* 2-byte VEX prefix. */
3768 i
.vex
.bytes
[0] = 0xc5;
3770 /* Check the REX.R bit. */
3771 r
= (i
.rex
& REX_R
) ? 0 : 1;
3772 i
.vex
.bytes
[1] = (r
<< 7
3773 | register_specifier
<< 3
3774 | vector_length
<< 2
3775 | i
.tm
.opcode_modifier
.opcodeprefix
);
3779 /* 3-byte VEX prefix. */
3782 switch (i
.tm
.opcode_modifier
.opcodespace
)
3787 i
.vex
.bytes
[0] = 0xc4;
3792 i
.vex
.bytes
[0] = 0x8f;
3798 /* The high 3 bits of the second VEX byte are 1's compliment
3799 of RXB bits from REX. */
3800 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3802 i
.vex
.bytes
[2] = (w
<< 7
3803 | register_specifier
<< 3
3804 | vector_length
<< 2
3805 | i
.tm
.opcode_modifier
.opcodeprefix
);
3810 is_evex_encoding (const insn_template
*t
)
3812 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3813 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3814 || t
->opcode_modifier
.sae
;
3818 is_any_vex_encoding (const insn_template
*t
)
3820 return t
->opcode_modifier
.vex
|| is_evex_encoding (t
);
3823 /* Build the EVEX prefix. */
3826 build_evex_prefix (void)
3828 unsigned int register_specifier
, w
;
3829 rex_byte vrex_used
= 0;
3831 /* Check register specifier. */
3832 if (i
.vex
.register_specifier
)
3834 gas_assert ((i
.vrex
& REX_X
) == 0);
3836 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3837 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3838 register_specifier
+= 8;
3839 /* The upper 16 registers are encoded in the fourth byte of the
3841 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3842 i
.vex
.bytes
[3] = 0x8;
3843 register_specifier
= ~register_specifier
& 0xf;
3847 register_specifier
= 0xf;
3849 /* Encode upper 16 vector index register in the fourth byte of
3851 if (!(i
.vrex
& REX_X
))
3852 i
.vex
.bytes
[3] = 0x8;
3857 /* 4 byte EVEX prefix. */
3859 i
.vex
.bytes
[0] = 0x62;
3861 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3863 gas_assert (i
.tm
.opcode_modifier
.opcodespace
>= SPACE_0F
);
3864 gas_assert (i
.tm
.opcode_modifier
.opcodespace
<= SPACE_0F3A
);
3865 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3867 /* The fifth bit of the second EVEX byte is 1's compliment of the
3868 REX_R bit in VREX. */
3869 if (!(i
.vrex
& REX_R
))
3870 i
.vex
.bytes
[1] |= 0x10;
3874 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3876 /* When all operands are registers, the REX_X bit in REX is not
3877 used. We reuse it to encode the upper 16 registers, which is
3878 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3879 as 1's compliment. */
3880 if ((i
.vrex
& REX_B
))
3883 i
.vex
.bytes
[1] &= ~0x40;
3887 /* EVEX instructions shouldn't need the REX prefix. */
3888 i
.vrex
&= ~vrex_used
;
3889 gas_assert (i
.vrex
== 0);
3891 /* Check the REX.W bit and VEXW. */
3892 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3893 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3894 else if (i
.tm
.opcode_modifier
.vexw
)
3895 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3897 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3899 /* The third byte of the EVEX prefix. */
3900 i
.vex
.bytes
[2] = ((w
<< 7)
3901 | (register_specifier
<< 3)
3902 | 4 /* Encode the U bit. */
3903 | i
.tm
.opcode_modifier
.opcodeprefix
);
3905 /* The fourth byte of the EVEX prefix. */
3906 /* The zeroing-masking bit. */
3907 if (i
.mask
.reg
&& i
.mask
.zeroing
)
3908 i
.vex
.bytes
[3] |= 0x80;
3910 /* Don't always set the broadcast bit if there is no RC. */
3911 if (i
.rounding
.type
== rc_none
)
3913 /* Encode the vector length. */
3914 unsigned int vec_length
;
3916 if (!i
.tm
.opcode_modifier
.evex
3917 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3921 /* Determine vector length from the last multi-length vector
3923 for (op
= i
.operands
; op
--;)
3924 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3925 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3926 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3928 if (i
.types
[op
].bitfield
.zmmword
)
3930 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3933 else if (i
.types
[op
].bitfield
.ymmword
)
3935 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3938 else if (i
.types
[op
].bitfield
.xmmword
)
3940 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3943 else if (i
.broadcast
.type
&& op
== i
.broadcast
.operand
)
3945 switch (i
.broadcast
.bytes
)
3948 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3951 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3954 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3963 if (op
>= MAX_OPERANDS
)
3967 switch (i
.tm
.opcode_modifier
.evex
)
3969 case EVEXLIG
: /* LL' is ignored */
3970 vec_length
= evexlig
<< 5;
3973 vec_length
= 0 << 5;
3976 vec_length
= 1 << 5;
3979 vec_length
= 2 << 5;
3985 i
.vex
.bytes
[3] |= vec_length
;
3986 /* Encode the broadcast bit. */
3987 if (i
.broadcast
.type
)
3988 i
.vex
.bytes
[3] |= 0x10;
3990 else if (i
.rounding
.type
!= saeonly
)
3991 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
.type
<< 5);
3993 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3996 i
.vex
.bytes
[3] |= i
.mask
.reg
->reg_num
;
4000 process_immext (void)
4004 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
4005 which is coded in the same place as an 8-bit immediate field
4006 would be. Here we fake an 8-bit immediate operand from the
4007 opcode suffix stored in tm.extension_opcode.
4009 AVX instructions also use this encoding, for some of
4010 3 argument instructions. */
4012 gas_assert (i
.imm_operands
<= 1
4014 || (is_any_vex_encoding (&i
.tm
)
4015 && i
.operands
<= 4)));
4017 exp
= &im_expressions
[i
.imm_operands
++];
4018 i
.op
[i
.operands
].imms
= exp
;
4019 i
.types
[i
.operands
] = imm8
;
4021 exp
->X_op
= O_constant
;
4022 exp
->X_add_number
= i
.tm
.extension_opcode
;
4023 i
.tm
.extension_opcode
= None
;
4030 switch (i
.tm
.opcode_modifier
.prefixok
)
4038 as_bad (_("invalid instruction `%s' after `%s'"),
4039 i
.tm
.name
, i
.hle_prefix
);
4042 if (i
.prefix
[LOCK_PREFIX
])
4044 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4048 case PrefixHLERelease
:
4049 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4051 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4055 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4057 as_bad (_("memory destination needed for instruction `%s'"
4058 " after `xrelease'"), i
.tm
.name
);
4065 /* Try the shortest encoding by shortening operand size. */
4068 optimize_encoding (void)
4072 if (optimize_for_space
4073 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4074 && i
.reg_operands
== 1
4075 && i
.imm_operands
== 1
4076 && !i
.types
[1].bitfield
.byte
4077 && i
.op
[0].imms
->X_op
== O_constant
4078 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4079 && (i
.tm
.base_opcode
== 0xa8
4080 || (i
.tm
.base_opcode
== 0xf6
4081 && i
.tm
.extension_opcode
== 0x0)))
4084 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4086 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4087 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4089 i
.types
[1].bitfield
.byte
= 1;
4090 /* Ignore the suffix. */
4092 /* Convert to byte registers. */
4093 if (i
.types
[1].bitfield
.word
)
4095 else if (i
.types
[1].bitfield
.dword
)
4099 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4104 else if (flag_code
== CODE_64BIT
4105 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4106 && ((i
.types
[1].bitfield
.qword
4107 && i
.reg_operands
== 1
4108 && i
.imm_operands
== 1
4109 && i
.op
[0].imms
->X_op
== O_constant
4110 && ((i
.tm
.base_opcode
== 0xb8
4111 && i
.tm
.extension_opcode
== None
4112 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4113 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4114 && ((i
.tm
.base_opcode
== 0x24
4115 || i
.tm
.base_opcode
== 0xa8)
4116 || (i
.tm
.base_opcode
== 0x80
4117 && i
.tm
.extension_opcode
== 0x4)
4118 || ((i
.tm
.base_opcode
== 0xf6
4119 || (i
.tm
.base_opcode
| 1) == 0xc7)
4120 && i
.tm
.extension_opcode
== 0x0)))
4121 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4122 && i
.tm
.base_opcode
== 0x83
4123 && i
.tm
.extension_opcode
== 0x4)))
4124 || (i
.types
[0].bitfield
.qword
4125 && ((i
.reg_operands
== 2
4126 && i
.op
[0].regs
== i
.op
[1].regs
4127 && (i
.tm
.base_opcode
== 0x30
4128 || i
.tm
.base_opcode
== 0x28))
4129 || (i
.reg_operands
== 1
4131 && i
.tm
.base_opcode
== 0x30)))))
4134 andq $imm31, %r64 -> andl $imm31, %r32
4135 andq $imm7, %r64 -> andl $imm7, %r32
4136 testq $imm31, %r64 -> testl $imm31, %r32
4137 xorq %r64, %r64 -> xorl %r32, %r32
4138 subq %r64, %r64 -> subl %r32, %r32
4139 movq $imm31, %r64 -> movl $imm31, %r32
4140 movq $imm32, %r64 -> movl $imm32, %r32
4142 i
.tm
.opcode_modifier
.norex64
= 1;
4143 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4146 movq $imm31, %r64 -> movl $imm31, %r32
4147 movq $imm32, %r64 -> movl $imm32, %r32
4149 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4150 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4151 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4152 i
.types
[0].bitfield
.imm32
= 1;
4153 i
.types
[0].bitfield
.imm32s
= 0;
4154 i
.types
[0].bitfield
.imm64
= 0;
4155 i
.types
[1].bitfield
.dword
= 1;
4156 i
.types
[1].bitfield
.qword
= 0;
4157 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4160 movq $imm31, %r64 -> movl $imm31, %r32
4162 i
.tm
.base_opcode
= 0xb8;
4163 i
.tm
.extension_opcode
= None
;
4164 i
.tm
.opcode_modifier
.w
= 0;
4165 i
.tm
.opcode_modifier
.modrm
= 0;
4169 else if (optimize
> 1
4170 && !optimize_for_space
4171 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4172 && i
.reg_operands
== 2
4173 && i
.op
[0].regs
== i
.op
[1].regs
4174 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4175 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4176 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4179 andb %rN, %rN -> testb %rN, %rN
4180 andw %rN, %rN -> testw %rN, %rN
4181 andq %rN, %rN -> testq %rN, %rN
4182 orb %rN, %rN -> testb %rN, %rN
4183 orw %rN, %rN -> testw %rN, %rN
4184 orq %rN, %rN -> testq %rN, %rN
4186 and outside of 64-bit mode
4188 andl %rN, %rN -> testl %rN, %rN
4189 orl %rN, %rN -> testl %rN, %rN
4191 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4193 else if (i
.reg_operands
== 3
4194 && i
.op
[0].regs
== i
.op
[1].regs
4195 && !i
.types
[2].bitfield
.xmmword
4196 && (i
.tm
.opcode_modifier
.vex
4197 || ((!i
.mask
.reg
|| i
.mask
.zeroing
)
4198 && i
.rounding
.type
== rc_none
4199 && is_evex_encoding (&i
.tm
)
4200 && (i
.vec_encoding
!= vex_encoding_evex
4201 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4202 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4203 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4204 && i
.types
[2].bitfield
.ymmword
))))
4205 && ((i
.tm
.base_opcode
== 0x55
4206 || i
.tm
.base_opcode
== 0x57
4207 || i
.tm
.base_opcode
== 0xdf
4208 || i
.tm
.base_opcode
== 0xef
4209 || i
.tm
.base_opcode
== 0xf8
4210 || i
.tm
.base_opcode
== 0xf9
4211 || i
.tm
.base_opcode
== 0xfa
4212 || i
.tm
.base_opcode
== 0xfb
4213 || i
.tm
.base_opcode
== 0x42
4214 || i
.tm
.base_opcode
== 0x47)
4215 && i
.tm
.extension_opcode
== None
))
4218 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4220 EVEX VOP %zmmM, %zmmM, %zmmN
4221 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4222 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4223 EVEX VOP %ymmM, %ymmM, %ymmN
4224 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4225 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4226 VEX VOP %ymmM, %ymmM, %ymmN
4227 -> VEX VOP %xmmM, %xmmM, %xmmN
4228 VOP, one of vpandn and vpxor:
4229 VEX VOP %ymmM, %ymmM, %ymmN
4230 -> VEX VOP %xmmM, %xmmM, %xmmN
4231 VOP, one of vpandnd and vpandnq:
4232 EVEX VOP %zmmM, %zmmM, %zmmN
4233 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4234 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4235 EVEX VOP %ymmM, %ymmM, %ymmN
4236 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4237 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4238 VOP, one of vpxord and vpxorq:
4239 EVEX VOP %zmmM, %zmmM, %zmmN
4240 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4241 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4242 EVEX VOP %ymmM, %ymmM, %ymmN
4243 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4244 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4245 VOP, one of kxord and kxorq:
4246 VEX VOP %kM, %kM, %kN
4247 -> VEX kxorw %kM, %kM, %kN
4248 VOP, one of kandnd and kandnq:
4249 VEX VOP %kM, %kM, %kN
4250 -> VEX kandnw %kM, %kM, %kN
4252 if (is_evex_encoding (&i
.tm
))
4254 if (i
.vec_encoding
!= vex_encoding_evex
)
4256 i
.tm
.opcode_modifier
.vex
= VEX128
;
4257 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4258 i
.tm
.opcode_modifier
.evex
= 0;
4260 else if (optimize
> 1)
4261 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4265 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4267 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_NONE
;
4268 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4271 i
.tm
.opcode_modifier
.vex
= VEX128
;
4273 if (i
.tm
.opcode_modifier
.vex
)
4274 for (j
= 0; j
< 3; j
++)
4276 i
.types
[j
].bitfield
.xmmword
= 1;
4277 i
.types
[j
].bitfield
.ymmword
= 0;
4280 else if (i
.vec_encoding
!= vex_encoding_evex
4281 && !i
.types
[0].bitfield
.zmmword
4282 && !i
.types
[1].bitfield
.zmmword
4284 && !i
.broadcast
.type
4285 && is_evex_encoding (&i
.tm
)
4286 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4287 || (i
.tm
.base_opcode
& ~4) == 0xdb
4288 || (i
.tm
.base_opcode
& ~4) == 0xeb)
4289 && i
.tm
.extension_opcode
== None
)
4292 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4293 vmovdqu32 and vmovdqu64:
4294 EVEX VOP %xmmM, %xmmN
4295 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4296 EVEX VOP %ymmM, %ymmN
4297 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4299 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4301 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4303 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4305 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4306 VOP, one of vpand, vpandn, vpor, vpxor:
4307 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4308 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4309 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4310 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4311 EVEX VOP{d,q} mem, %xmmM, %xmmN
4312 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4313 EVEX VOP{d,q} mem, %ymmM, %ymmN
4314 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4316 for (j
= 0; j
< i
.operands
; j
++)
4317 if (operand_type_check (i
.types
[j
], disp
)
4318 && i
.op
[j
].disps
->X_op
== O_constant
)
4320 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4321 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4322 bytes, we choose EVEX Disp8 over VEX Disp32. */
4323 int evex_disp8
, vex_disp8
;
4324 unsigned int memshift
= i
.memshift
;
4325 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4327 evex_disp8
= fits_in_disp8 (n
);
4329 vex_disp8
= fits_in_disp8 (n
);
4330 if (evex_disp8
!= vex_disp8
)
4332 i
.memshift
= memshift
;
4336 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4339 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4340 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
)
4341 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4342 i
.tm
.opcode_modifier
.vex
4343 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4344 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4345 /* VPAND, VPOR, and VPXOR are commutative. */
4346 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0xdf)
4347 i
.tm
.opcode_modifier
.commutative
= 1;
4348 i
.tm
.opcode_modifier
.evex
= 0;
4349 i
.tm
.opcode_modifier
.masking
= 0;
4350 i
.tm
.opcode_modifier
.broadcast
= 0;
4351 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4354 i
.types
[j
].bitfield
.disp8
4355 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4359 /* Return non-zero for load instruction. */
4365 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4366 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4370 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4371 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4372 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4373 if (i
.tm
.opcode_modifier
.anysize
)
4377 if (strcmp (i
.tm
.name
, "pop") == 0)
4381 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4384 if (i
.tm
.base_opcode
== 0x9d
4385 || i
.tm
.base_opcode
== 0x61)
4388 /* movs, cmps, lods, scas. */
4389 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4393 if (base_opcode
== 0x6f
4394 || i
.tm
.base_opcode
== 0xd7)
4396 /* NB: For AMD-specific insns with implicit memory operands,
4397 they're intentionally not covered. */
4400 /* No memory operand. */
4401 if (!i
.mem_operands
)
4407 if (i
.tm
.base_opcode
== 0xae
4408 && i
.tm
.opcode_modifier
.vex
4409 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4410 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4411 && i
.tm
.extension_opcode
== 2)
4414 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4416 /* test, not, neg, mul, imul, div, idiv. */
4417 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4418 && i
.tm
.extension_opcode
!= 1)
4422 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4425 /* add, or, adc, sbb, and, sub, xor, cmp. */
4426 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4429 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4430 if ((base_opcode
== 0xc1
4431 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4432 && i
.tm
.extension_opcode
!= 6)
4435 /* Check for x87 instructions. */
4436 if (base_opcode
>= 0xd8 && base_opcode
<= 0xdf)
4438 /* Skip fst, fstp, fstenv, fstcw. */
4439 if (i
.tm
.base_opcode
== 0xd9
4440 && (i
.tm
.extension_opcode
== 2
4441 || i
.tm
.extension_opcode
== 3
4442 || i
.tm
.extension_opcode
== 6
4443 || i
.tm
.extension_opcode
== 7))
4446 /* Skip fisttp, fist, fistp, fstp. */
4447 if (i
.tm
.base_opcode
== 0xdb
4448 && (i
.tm
.extension_opcode
== 1
4449 || i
.tm
.extension_opcode
== 2
4450 || i
.tm
.extension_opcode
== 3
4451 || i
.tm
.extension_opcode
== 7))
4454 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4455 if (i
.tm
.base_opcode
== 0xdd
4456 && (i
.tm
.extension_opcode
== 1
4457 || i
.tm
.extension_opcode
== 2
4458 || i
.tm
.extension_opcode
== 3
4459 || i
.tm
.extension_opcode
== 6
4460 || i
.tm
.extension_opcode
== 7))
4463 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4464 if (i
.tm
.base_opcode
== 0xdf
4465 && (i
.tm
.extension_opcode
== 1
4466 || i
.tm
.extension_opcode
== 2
4467 || i
.tm
.extension_opcode
== 3
4468 || i
.tm
.extension_opcode
== 6
4469 || i
.tm
.extension_opcode
== 7))
4475 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
)
4477 /* bt, bts, btr, btc. */
4478 if (i
.tm
.base_opcode
== 0xba
4479 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4482 /* cmpxchg8b, cmpxchg16b, xrstors, vmptrld. */
4483 if (i
.tm
.base_opcode
== 0xc7
4484 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4485 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3
4486 || i
.tm
.extension_opcode
== 6))
4489 /* fxrstor, ldmxcsr, xrstor. */
4490 if (i
.tm
.base_opcode
== 0xae
4491 && (i
.tm
.extension_opcode
== 1
4492 || i
.tm
.extension_opcode
== 2
4493 || i
.tm
.extension_opcode
== 5))
4496 /* lgdt, lidt, lmsw. */
4497 if (i
.tm
.base_opcode
== 0x01
4498 && (i
.tm
.extension_opcode
== 2
4499 || i
.tm
.extension_opcode
== 3
4500 || i
.tm
.extension_opcode
== 6))
4504 dest
= i
.operands
- 1;
4506 /* Check fake imm8 operand and 3 source operands. */
4507 if ((i
.tm
.opcode_modifier
.immext
4508 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4509 && i
.types
[dest
].bitfield
.imm8
)
4512 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg. */
4513 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4514 && (base_opcode
== 0x1
4515 || base_opcode
== 0x9
4516 || base_opcode
== 0x11
4517 || base_opcode
== 0x19
4518 || base_opcode
== 0x21
4519 || base_opcode
== 0x29
4520 || base_opcode
== 0x31
4521 || base_opcode
== 0x39
4522 || (base_opcode
| 2) == 0x87))
4526 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4527 && base_opcode
== 0xc1)
4530 /* Check for load instruction. */
4531 return (i
.types
[dest
].bitfield
.class != ClassNone
4532 || i
.types
[dest
].bitfield
.instance
== Accum
);
4535 /* Output lfence, 0xfaee8, after instruction. */
4538 insert_lfence_after (void)
4540 if (lfence_after_load
&& load_insn_p ())
4542 /* There are also two REP string instructions that require
4543 special treatment. Specifically, the compare string (CMPS)
4544 and scan string (SCAS) instructions set EFLAGS in a manner
4545 that depends on the data being compared/scanned. When used
4546 with a REP prefix, the number of iterations may therefore
4547 vary depending on this data. If the data is a program secret
4548 chosen by the adversary using an LVI method,
4549 then this data-dependent behavior may leak some aspect
4551 if (((i
.tm
.base_opcode
| 0x1) == 0xa7
4552 || (i
.tm
.base_opcode
| 0x1) == 0xaf)
4553 && i
.prefix
[REP_PREFIX
])
4555 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4558 char *p
= frag_more (3);
4565 /* Output lfence, 0xfaee8, before instruction. */
4568 insert_lfence_before (void)
4572 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
4575 if (i
.tm
.base_opcode
== 0xff
4576 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4578 /* Insert lfence before indirect branch if needed. */
4580 if (lfence_before_indirect_branch
== lfence_branch_none
)
4583 if (i
.operands
!= 1)
4586 if (i
.reg_operands
== 1)
4588 /* Indirect branch via register. Don't insert lfence with
4589 -mlfence-after-load=yes. */
4590 if (lfence_after_load
4591 || lfence_before_indirect_branch
== lfence_branch_memory
)
4594 else if (i
.mem_operands
== 1
4595 && lfence_before_indirect_branch
!= lfence_branch_register
)
4597 as_warn (_("indirect `%s` with memory operand should be avoided"),
4604 if (last_insn
.kind
!= last_insn_other
4605 && last_insn
.seg
== now_seg
)
4607 as_warn_where (last_insn
.file
, last_insn
.line
,
4608 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4609 last_insn
.name
, i
.tm
.name
);
4620 /* Output or/not/shl and lfence before near ret. */
4621 if (lfence_before_ret
!= lfence_before_ret_none
4622 && (i
.tm
.base_opcode
== 0xc2
4623 || i
.tm
.base_opcode
== 0xc3))
4625 if (last_insn
.kind
!= last_insn_other
4626 && last_insn
.seg
== now_seg
)
4628 as_warn_where (last_insn
.file
, last_insn
.line
,
4629 _("`%s` skips -mlfence-before-ret on `%s`"),
4630 last_insn
.name
, i
.tm
.name
);
4634 /* Near ret ingore operand size override under CPU64. */
4635 char prefix
= flag_code
== CODE_64BIT
4637 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4639 if (lfence_before_ret
== lfence_before_ret_not
)
4641 /* not: 0xf71424, may add prefix
4642 for operand size override or 64-bit code. */
4643 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4657 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4660 if (lfence_before_ret
== lfence_before_ret_or
)
4662 /* or: 0x830c2400, may add prefix
4663 for operand size override or 64-bit code. */
4669 /* shl: 0xc1242400, may add prefix
4670 for operand size override or 64-bit code. */
4685 /* This is the guts of the machine-dependent assembler. LINE points to a
4686 machine dependent instruction. This function is supposed to emit
4687 the frags/bytes it assembles to. */
4690 md_assemble (char *line
)
4693 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4694 const insn_template
*t
;
4696 /* Initialize globals. */
4697 memset (&i
, '\0', sizeof (i
));
4698 i
.rounding
.type
= rc_none
;
4699 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4700 i
.reloc
[j
] = NO_RELOC
;
4701 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4702 memset (im_expressions
, '\0', sizeof (im_expressions
));
4703 save_stack_p
= save_stack
;
4705 /* First parse an instruction mnemonic & call i386_operand for the operands.
4706 We assume that the scrubber has arranged it so that line[0] is the valid
4707 start of a (possibly prefixed) mnemonic. */
4709 line
= parse_insn (line
, mnemonic
);
4712 mnem_suffix
= i
.suffix
;
4714 line
= parse_operands (line
, mnemonic
);
4716 xfree (i
.memop1_string
);
4717 i
.memop1_string
= NULL
;
4721 /* Now we've parsed the mnemonic into a set of templates, and have the
4722 operands at hand. */
4724 /* All Intel opcodes have reversed operands except for "bound", "enter",
4725 "invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
4726 "rmpadjust", and "rmpupdate". We also don't reverse intersegment "jmp"
4727 and "call" instructions with 2 immediate operands so that the immediate
4728 segment precedes the offset consistently in Intel and AT&T modes. */
4731 && (strcmp (mnemonic
, "bound") != 0)
4732 && (strncmp (mnemonic
, "invlpg", 6) != 0)
4733 && (strncmp (mnemonic
, "monitor", 7) != 0)
4734 && (strncmp (mnemonic
, "mwait", 5) != 0)
4735 && (strcmp (mnemonic
, "pvalidate") != 0)
4736 && (strncmp (mnemonic
, "rmp", 3) != 0)
4737 && (strcmp (mnemonic
, "tpause") != 0)
4738 && (strcmp (mnemonic
, "umwait") != 0)
4739 && !(operand_type_check (i
.types
[0], imm
)
4740 && operand_type_check (i
.types
[1], imm
)))
4743 /* The order of the immediates should be reversed
4744 for 2 immediates extrq and insertq instructions */
4745 if (i
.imm_operands
== 2
4746 && (strcmp (mnemonic
, "extrq") == 0
4747 || strcmp (mnemonic
, "insertq") == 0))
4748 swap_2_operands (0, 1);
4753 /* Don't optimize displacement for movabs since it only takes 64bit
4756 && i
.disp_encoding
!= disp_encoding_32bit
4757 && (flag_code
!= CODE_64BIT
4758 || strcmp (mnemonic
, "movabs") != 0))
4761 /* Next, we find a template that matches the given insn,
4762 making sure the overlap of the given operands types is consistent
4763 with the template operand types. */
4765 if (!(t
= match_template (mnem_suffix
)))
4768 if (sse_check
!= check_none
4769 && !i
.tm
.opcode_modifier
.noavx
4770 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4771 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
4772 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4773 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4774 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4775 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4776 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4777 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4778 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4779 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4780 || i
.tm
.cpu_flags
.bitfield
.cpusha
4781 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4783 (sse_check
== check_warning
4785 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4788 if (i
.tm
.opcode_modifier
.fwait
)
4789 if (!add_prefix (FWAIT_OPCODE
))
4792 /* Check if REP prefix is OK. */
4793 if (i
.rep_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixRep
)
4795 as_bad (_("invalid instruction `%s' after `%s'"),
4796 i
.tm
.name
, i
.rep_prefix
);
4800 /* Check for lock without a lockable instruction. Destination operand
4801 must be memory unless it is xchg (0x86). */
4802 if (i
.prefix
[LOCK_PREFIX
]
4803 && (i
.tm
.opcode_modifier
.prefixok
< PrefixLock
4804 || i
.mem_operands
== 0
4805 || (i
.tm
.base_opcode
!= 0x86
4806 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4808 as_bad (_("expecting lockable instruction after `lock'"));
4812 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
4813 if (i
.prefix
[DATA_PREFIX
]
4814 && (is_any_vex_encoding (&i
.tm
)
4815 || i
.tm
.operand_types
[i
.imm_operands
].bitfield
.class >= RegMMX
4816 || i
.tm
.operand_types
[i
.imm_operands
+ 1].bitfield
.class >= RegMMX
))
4818 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
4822 /* Check if HLE prefix is OK. */
4823 if (i
.hle_prefix
&& !check_hle ())
4826 /* Check BND prefix. */
4827 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4828 as_bad (_("expecting valid branch instruction after `bnd'"));
4830 /* Check NOTRACK prefix. */
4831 if (i
.notrack_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixNoTrack
)
4832 as_bad (_("expecting indirect branch instruction after `notrack'"));
4834 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4836 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4837 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4838 else if (flag_code
!= CODE_16BIT
4839 ? i
.prefix
[ADDR_PREFIX
]
4840 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4841 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4844 /* Insert BND prefix. */
4845 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
4847 if (!i
.prefix
[BND_PREFIX
])
4848 add_prefix (BND_PREFIX_OPCODE
);
4849 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
4851 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4852 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
4856 /* Check string instruction segment overrides. */
4857 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
4859 gas_assert (i
.mem_operands
);
4860 if (!check_string ())
4862 i
.disp_operands
= 0;
4865 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4866 optimize_encoding ();
4868 if (!process_suffix ())
4871 /* Update operand types and check extended states. */
4872 for (j
= 0; j
< i
.operands
; j
++)
4874 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4875 switch (i
.tm
.operand_types
[j
].bitfield
.class)
4880 i
.xstate
|= xstate_mmx
;
4883 i
.xstate
|= xstate_mask
;
4886 if (i
.tm
.operand_types
[j
].bitfield
.tmmword
)
4887 i
.xstate
|= xstate_tmm
;
4888 else if (i
.tm
.operand_types
[j
].bitfield
.zmmword
)
4889 i
.xstate
|= xstate_zmm
;
4890 else if (i
.tm
.operand_types
[j
].bitfield
.ymmword
)
4891 i
.xstate
|= xstate_ymm
;
4892 else if (i
.tm
.operand_types
[j
].bitfield
.xmmword
)
4893 i
.xstate
|= xstate_xmm
;
4898 /* Make still unresolved immediate matches conform to size of immediate
4899 given in i.suffix. */
4900 if (!finalize_imm ())
4903 if (i
.types
[0].bitfield
.imm1
)
4904 i
.imm_operands
= 0; /* kludge for shift insns. */
4906 /* We only need to check those implicit registers for instructions
4907 with 3 operands or less. */
4908 if (i
.operands
<= 3)
4909 for (j
= 0; j
< i
.operands
; j
++)
4910 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
4911 && !i
.types
[j
].bitfield
.xmmword
)
4914 /* For insns with operands there are more diddles to do to the opcode. */
4917 if (!process_operands ())
4920 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4922 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4923 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4926 if (is_any_vex_encoding (&i
.tm
))
4928 if (!cpu_arch_flags
.bitfield
.cpui286
)
4930 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4935 /* Check for explicit REX prefix. */
4936 if (i
.prefix
[REX_PREFIX
] || i
.rex_encoding
)
4938 as_bad (_("REX prefix invalid with `%s'"), i
.tm
.name
);
4942 if (i
.tm
.opcode_modifier
.vex
)
4943 build_vex_prefix (t
);
4945 build_evex_prefix ();
4947 /* The individual REX.RXBW bits got consumed. */
4948 i
.rex
&= REX_OPCODE
;
4951 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4952 instructions may define INT_OPCODE as well, so avoid this corner
4953 case for those instructions that use MODRM. */
4954 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4955 && i
.tm
.base_opcode
== INT_OPCODE
4956 && !i
.tm
.opcode_modifier
.modrm
4957 && i
.op
[0].imms
->X_add_number
== 3)
4959 i
.tm
.base_opcode
= INT3_OPCODE
;
4963 if ((i
.tm
.opcode_modifier
.jump
== JUMP
4964 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
4965 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
4966 && i
.op
[0].disps
->X_op
== O_constant
)
4968 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4969 the absolute address given by the constant. Since ix86 jumps and
4970 calls are pc relative, we need to generate a reloc. */
4971 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4972 i
.op
[0].disps
->X_op
= O_symbol
;
4975 /* For 8 bit registers we need an empty rex prefix. Also if the
4976 instruction already has a prefix, we need to convert old
4977 registers to new ones. */
4979 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
4980 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4981 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
4982 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4983 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
4984 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
4989 i
.rex
|= REX_OPCODE
;
4990 for (x
= 0; x
< 2; x
++)
4992 /* Look for 8 bit operand that uses old registers. */
4993 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
4994 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4996 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4997 /* In case it is "hi" register, give up. */
4998 if (i
.op
[x
].regs
->reg_num
> 3)
4999 as_bad (_("can't encode register '%s%s' in an "
5000 "instruction requiring REX prefix."),
5001 register_prefix
, i
.op
[x
].regs
->reg_name
);
5003 /* Otherwise it is equivalent to the extended register.
5004 Since the encoding doesn't change this is merely
5005 cosmetic cleanup for debug output. */
5007 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
5012 if (i
.rex
== 0 && i
.rex_encoding
)
5014 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
5015 that uses legacy register. If it is "hi" register, don't add
5016 the REX_OPCODE byte. */
5018 for (x
= 0; x
< 2; x
++)
5019 if (i
.types
[x
].bitfield
.class == Reg
5020 && i
.types
[x
].bitfield
.byte
5021 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
5022 && i
.op
[x
].regs
->reg_num
> 3)
5024 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5025 i
.rex_encoding
= false;
5034 add_prefix (REX_OPCODE
| i
.rex
);
5036 insert_lfence_before ();
5038 /* We are ready to output the insn. */
5041 insert_lfence_after ();
5043 last_insn
.seg
= now_seg
;
5045 if (i
.tm
.opcode_modifier
.isprefix
)
5047 last_insn
.kind
= last_insn_prefix
;
5048 last_insn
.name
= i
.tm
.name
;
5049 last_insn
.file
= as_where (&last_insn
.line
);
5052 last_insn
.kind
= last_insn_other
;
5056 parse_insn (char *line
, char *mnemonic
)
5059 char *token_start
= l
;
5062 const insn_template
*t
;
5068 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5073 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5075 as_bad (_("no such instruction: `%s'"), token_start
);
5080 if (!is_space_char (*l
)
5081 && *l
!= END_OF_INSN
5083 || (*l
!= PREFIX_SEPARATOR
5086 as_bad (_("invalid character %s in mnemonic"),
5087 output_invalid (*l
));
5090 if (token_start
== l
)
5092 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5093 as_bad (_("expecting prefix; got nothing"));
5095 as_bad (_("expecting mnemonic; got nothing"));
5099 /* Look up instruction (or prefix) via hash table. */
5100 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5102 if (*l
!= END_OF_INSN
5103 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5104 && current_templates
5105 && current_templates
->start
->opcode_modifier
.isprefix
)
5107 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5109 as_bad ((flag_code
!= CODE_64BIT
5110 ? _("`%s' is only supported in 64-bit mode")
5111 : _("`%s' is not supported in 64-bit mode")),
5112 current_templates
->start
->name
);
5115 /* If we are in 16-bit mode, do not allow addr16 or data16.
5116 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5117 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5118 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5119 && flag_code
!= CODE_64BIT
5120 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5121 ^ (flag_code
== CODE_16BIT
)))
5123 as_bad (_("redundant %s prefix"),
5124 current_templates
->start
->name
);
5128 if (current_templates
->start
->base_opcode
== PSEUDO_PREFIX
)
5130 /* Handle pseudo prefixes. */
5131 switch (current_templates
->start
->extension_opcode
)
5135 i
.disp_encoding
= disp_encoding_8bit
;
5139 i
.disp_encoding
= disp_encoding_16bit
;
5143 i
.disp_encoding
= disp_encoding_32bit
;
5147 i
.dir_encoding
= dir_encoding_load
;
5151 i
.dir_encoding
= dir_encoding_store
;
5155 i
.vec_encoding
= vex_encoding_vex
;
5159 i
.vec_encoding
= vex_encoding_vex3
;
5163 i
.vec_encoding
= vex_encoding_evex
;
5167 i
.rex_encoding
= true;
5169 case Prefix_NoOptimize
:
5171 i
.no_optimize
= true;
5179 /* Add prefix, checking for repeated prefixes. */
5180 switch (add_prefix (current_templates
->start
->base_opcode
))
5185 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5186 i
.notrack_prefix
= current_templates
->start
->name
;
5189 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5190 i
.hle_prefix
= current_templates
->start
->name
;
5191 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5192 i
.bnd_prefix
= current_templates
->start
->name
;
5194 i
.rep_prefix
= current_templates
->start
->name
;
5200 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5207 if (!current_templates
)
5209 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5210 Check if we should swap operand or force 32bit displacement in
5212 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5213 i
.dir_encoding
= dir_encoding_swap
;
5214 else if (mnem_p
- 3 == dot_p
5217 i
.disp_encoding
= disp_encoding_8bit
;
5218 else if (mnem_p
- 4 == dot_p
5222 i
.disp_encoding
= disp_encoding_32bit
;
5227 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5230 if (!current_templates
)
5233 if (mnem_p
> mnemonic
)
5235 /* See if we can get a match by trimming off a suffix. */
5238 case WORD_MNEM_SUFFIX
:
5239 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5240 i
.suffix
= SHORT_MNEM_SUFFIX
;
5243 case BYTE_MNEM_SUFFIX
:
5244 case QWORD_MNEM_SUFFIX
:
5245 i
.suffix
= mnem_p
[-1];
5248 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5250 case SHORT_MNEM_SUFFIX
:
5251 case LONG_MNEM_SUFFIX
:
5254 i
.suffix
= mnem_p
[-1];
5257 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5265 if (intel_float_operand (mnemonic
) == 1)
5266 i
.suffix
= SHORT_MNEM_SUFFIX
;
5268 i
.suffix
= LONG_MNEM_SUFFIX
;
5271 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5277 if (!current_templates
)
5279 as_bad (_("no such instruction: `%s'"), token_start
);
5284 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5285 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5287 /* Check for a branch hint. We allow ",pt" and ",pn" for
5288 predict taken and predict not taken respectively.
5289 I'm not sure that branch hints actually do anything on loop
5290 and jcxz insns (JumpByte) for current Pentium4 chips. They
5291 may work in the future and it doesn't hurt to accept them
5293 if (l
[0] == ',' && l
[1] == 'p')
5297 if (!add_prefix (DS_PREFIX_OPCODE
))
5301 else if (l
[2] == 'n')
5303 if (!add_prefix (CS_PREFIX_OPCODE
))
5309 /* Any other comma loses. */
5312 as_bad (_("invalid character %s in mnemonic"),
5313 output_invalid (*l
));
5317 /* Check if instruction is supported on specified architecture. */
5319 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5321 supported
|= cpu_flags_match (t
);
5322 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5324 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
5325 as_warn (_("use .code16 to ensure correct addressing mode"));
5331 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5332 as_bad (flag_code
== CODE_64BIT
5333 ? _("`%s' is not supported in 64-bit mode")
5334 : _("`%s' is only supported in 64-bit mode"),
5335 current_templates
->start
->name
);
5337 as_bad (_("`%s' is not supported on `%s%s'"),
5338 current_templates
->start
->name
,
5339 cpu_arch_name
? cpu_arch_name
: default_arch
,
5340 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5346 parse_operands (char *l
, const char *mnemonic
)
5350 /* 1 if operand is pending after ','. */
5351 unsigned int expecting_operand
= 0;
5353 /* Non-zero if operand parens not balanced. */
5354 unsigned int paren_not_balanced
;
5356 while (*l
!= END_OF_INSN
)
5358 /* Skip optional white space before operand. */
5359 if (is_space_char (*l
))
5361 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5363 as_bad (_("invalid character %s before operand %d"),
5364 output_invalid (*l
),
5368 token_start
= l
; /* After white space. */
5369 paren_not_balanced
= 0;
5370 while (paren_not_balanced
|| *l
!= ',')
5372 if (*l
== END_OF_INSN
)
5374 if (paren_not_balanced
)
5377 as_bad (_("unbalanced parenthesis in operand %d."),
5380 as_bad (_("unbalanced brackets in operand %d."),
5385 break; /* we are done */
5387 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
5389 as_bad (_("invalid character %s in operand %d"),
5390 output_invalid (*l
),
5397 ++paren_not_balanced
;
5399 --paren_not_balanced
;
5404 ++paren_not_balanced
;
5406 --paren_not_balanced
;
5410 if (l
!= token_start
)
5411 { /* Yes, we've read in another operand. */
5412 unsigned int operand_ok
;
5413 this_operand
= i
.operands
++;
5414 if (i
.operands
> MAX_OPERANDS
)
5416 as_bad (_("spurious operands; (%d operands/instruction max)"),
5420 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5421 /* Now parse operand adding info to 'i' as we go along. */
5422 END_STRING_AND_SAVE (l
);
5424 if (i
.mem_operands
> 1)
5426 as_bad (_("too many memory references for `%s'"),
5433 i386_intel_operand (token_start
,
5434 intel_float_operand (mnemonic
));
5436 operand_ok
= i386_att_operand (token_start
);
5438 RESTORE_END_STRING (l
);
5444 if (expecting_operand
)
5446 expecting_operand_after_comma
:
5447 as_bad (_("expecting operand after ','; got nothing"));
5452 as_bad (_("expecting operand before ','; got nothing"));
5457 /* Now *l must be either ',' or END_OF_INSN. */
5460 if (*++l
== END_OF_INSN
)
5462 /* Just skip it, if it's \n complain. */
5463 goto expecting_operand_after_comma
;
5465 expecting_operand
= 1;
5472 swap_2_operands (unsigned int xchg1
, unsigned int xchg2
)
5474 union i386_op temp_op
;
5475 i386_operand_type temp_type
;
5476 unsigned int temp_flags
;
5477 enum bfd_reloc_code_real temp_reloc
;
5479 temp_type
= i
.types
[xchg2
];
5480 i
.types
[xchg2
] = i
.types
[xchg1
];
5481 i
.types
[xchg1
] = temp_type
;
5483 temp_flags
= i
.flags
[xchg2
];
5484 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5485 i
.flags
[xchg1
] = temp_flags
;
5487 temp_op
= i
.op
[xchg2
];
5488 i
.op
[xchg2
] = i
.op
[xchg1
];
5489 i
.op
[xchg1
] = temp_op
;
5491 temp_reloc
= i
.reloc
[xchg2
];
5492 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5493 i
.reloc
[xchg1
] = temp_reloc
;
5497 if (i
.mask
.operand
== xchg1
)
5498 i
.mask
.operand
= xchg2
;
5499 else if (i
.mask
.operand
== xchg2
)
5500 i
.mask
.operand
= xchg1
;
5502 if (i
.broadcast
.type
)
5504 if (i
.broadcast
.operand
== xchg1
)
5505 i
.broadcast
.operand
= xchg2
;
5506 else if (i
.broadcast
.operand
== xchg2
)
5507 i
.broadcast
.operand
= xchg1
;
5509 if (i
.rounding
.type
!= rc_none
)
5511 if (i
.rounding
.operand
== xchg1
)
5512 i
.rounding
.operand
= xchg2
;
5513 else if (i
.rounding
.operand
== xchg2
)
5514 i
.rounding
.operand
= xchg1
;
5519 swap_operands (void)
5525 swap_2_operands (1, i
.operands
- 2);
5529 swap_2_operands (0, i
.operands
- 1);
5535 if (i
.mem_operands
== 2)
5537 const reg_entry
*temp_seg
;
5538 temp_seg
= i
.seg
[0];
5539 i
.seg
[0] = i
.seg
[1];
5540 i
.seg
[1] = temp_seg
;
5544 /* Try to ensure constant immediates are represented in the smallest
5549 char guess_suffix
= 0;
5553 guess_suffix
= i
.suffix
;
5554 else if (i
.reg_operands
)
5556 /* Figure out a suffix from the last register operand specified.
5557 We can't do this properly yet, i.e. excluding special register
5558 instances, but the following works for instructions with
5559 immediates. In any case, we can't set i.suffix yet. */
5560 for (op
= i
.operands
; --op
>= 0;)
5561 if (i
.types
[op
].bitfield
.class != Reg
)
5563 else if (i
.types
[op
].bitfield
.byte
)
5565 guess_suffix
= BYTE_MNEM_SUFFIX
;
5568 else if (i
.types
[op
].bitfield
.word
)
5570 guess_suffix
= WORD_MNEM_SUFFIX
;
5573 else if (i
.types
[op
].bitfield
.dword
)
5575 guess_suffix
= LONG_MNEM_SUFFIX
;
5578 else if (i
.types
[op
].bitfield
.qword
)
5580 guess_suffix
= QWORD_MNEM_SUFFIX
;
5584 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5585 guess_suffix
= WORD_MNEM_SUFFIX
;
5587 for (op
= i
.operands
; --op
>= 0;)
5588 if (operand_type_check (i
.types
[op
], imm
))
5590 switch (i
.op
[op
].imms
->X_op
)
5593 /* If a suffix is given, this operand may be shortened. */
5594 switch (guess_suffix
)
5596 case LONG_MNEM_SUFFIX
:
5597 i
.types
[op
].bitfield
.imm32
= 1;
5598 i
.types
[op
].bitfield
.imm64
= 1;
5600 case WORD_MNEM_SUFFIX
:
5601 i
.types
[op
].bitfield
.imm16
= 1;
5602 i
.types
[op
].bitfield
.imm32
= 1;
5603 i
.types
[op
].bitfield
.imm32s
= 1;
5604 i
.types
[op
].bitfield
.imm64
= 1;
5606 case BYTE_MNEM_SUFFIX
:
5607 i
.types
[op
].bitfield
.imm8
= 1;
5608 i
.types
[op
].bitfield
.imm8s
= 1;
5609 i
.types
[op
].bitfield
.imm16
= 1;
5610 i
.types
[op
].bitfield
.imm32
= 1;
5611 i
.types
[op
].bitfield
.imm32s
= 1;
5612 i
.types
[op
].bitfield
.imm64
= 1;
5616 /* If this operand is at most 16 bits, convert it
5617 to a signed 16 bit number before trying to see
5618 whether it will fit in an even smaller size.
5619 This allows a 16-bit operand such as $0xffe0 to
5620 be recognised as within Imm8S range. */
5621 if ((i
.types
[op
].bitfield
.imm16
)
5622 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5624 i
.op
[op
].imms
->X_add_number
=
5625 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
5628 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5629 if ((i
.types
[op
].bitfield
.imm32
)
5630 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5633 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5634 ^ ((offsetT
) 1 << 31))
5635 - ((offsetT
) 1 << 31));
5639 = operand_type_or (i
.types
[op
],
5640 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5642 /* We must avoid matching of Imm32 templates when 64bit
5643 only immediate is available. */
5644 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5645 i
.types
[op
].bitfield
.imm32
= 0;
5652 /* Symbols and expressions. */
5654 /* Convert symbolic operand to proper sizes for matching, but don't
5655 prevent matching a set of insns that only supports sizes other
5656 than those matching the insn suffix. */
5658 i386_operand_type mask
, allowed
;
5659 const insn_template
*t
;
5661 operand_type_set (&mask
, 0);
5662 operand_type_set (&allowed
, 0);
5664 for (t
= current_templates
->start
;
5665 t
< current_templates
->end
;
5668 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5669 allowed
= operand_type_and (allowed
, anyimm
);
5671 switch (guess_suffix
)
5673 case QWORD_MNEM_SUFFIX
:
5674 mask
.bitfield
.imm64
= 1;
5675 mask
.bitfield
.imm32s
= 1;
5677 case LONG_MNEM_SUFFIX
:
5678 mask
.bitfield
.imm32
= 1;
5680 case WORD_MNEM_SUFFIX
:
5681 mask
.bitfield
.imm16
= 1;
5683 case BYTE_MNEM_SUFFIX
:
5684 mask
.bitfield
.imm8
= 1;
5689 allowed
= operand_type_and (mask
, allowed
);
5690 if (!operand_type_all_zero (&allowed
))
5691 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5698 /* Try to use the smallest displacement type too. */
5700 optimize_disp (void)
5704 for (op
= i
.operands
; --op
>= 0;)
5705 if (operand_type_check (i
.types
[op
], disp
))
5707 if (i
.op
[op
].disps
->X_op
== O_constant
)
5709 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5711 if (i
.types
[op
].bitfield
.disp16
5712 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5714 /* If this operand is at most 16 bits, convert
5715 to a signed 16 bit number and don't use 64bit
5717 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5718 i
.types
[op
].bitfield
.disp64
= 0;
5721 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5722 if (i
.types
[op
].bitfield
.disp32
5723 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5725 /* If this operand is at most 32 bits, convert
5726 to a signed 32 bit number and don't use 64bit
5728 op_disp
&= (((offsetT
) 2 << 31) - 1);
5729 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5730 i
.types
[op
].bitfield
.disp64
= 0;
5733 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5735 i
.types
[op
].bitfield
.disp8
= 0;
5736 i
.types
[op
].bitfield
.disp16
= 0;
5737 i
.types
[op
].bitfield
.disp32
= 0;
5738 i
.types
[op
].bitfield
.disp32s
= 0;
5739 i
.types
[op
].bitfield
.disp64
= 0;
5743 else if (flag_code
== CODE_64BIT
)
5745 if (fits_in_signed_long (op_disp
))
5747 i
.types
[op
].bitfield
.disp64
= 0;
5748 i
.types
[op
].bitfield
.disp32s
= 1;
5750 if (i
.prefix
[ADDR_PREFIX
]
5751 && fits_in_unsigned_long (op_disp
))
5752 i
.types
[op
].bitfield
.disp32
= 1;
5754 if ((i
.types
[op
].bitfield
.disp32
5755 || i
.types
[op
].bitfield
.disp32s
5756 || i
.types
[op
].bitfield
.disp16
)
5757 && fits_in_disp8 (op_disp
))
5758 i
.types
[op
].bitfield
.disp8
= 1;
5760 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5761 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5763 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5764 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5765 i
.types
[op
].bitfield
.disp8
= 0;
5766 i
.types
[op
].bitfield
.disp16
= 0;
5767 i
.types
[op
].bitfield
.disp32
= 0;
5768 i
.types
[op
].bitfield
.disp32s
= 0;
5769 i
.types
[op
].bitfield
.disp64
= 0;
5772 /* We only support 64bit displacement on constants. */
5773 i
.types
[op
].bitfield
.disp64
= 0;
5777 /* Return 1 if there is a match in broadcast bytes between operand
5778 GIVEN and instruction template T. */
5781 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5783 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5784 && i
.types
[given
].bitfield
.byte
)
5785 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5786 && i
.types
[given
].bitfield
.word
)
5787 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5788 && i
.types
[given
].bitfield
.dword
)
5789 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5790 && i
.types
[given
].bitfield
.qword
));
5793 /* Check if operands are valid for the instruction. */
5796 check_VecOperands (const insn_template
*t
)
5801 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5802 any one operand are implicity requiring AVX512VL support if the actual
5803 operand size is YMMword or XMMword. Since this function runs after
5804 template matching, there's no need to check for YMMword/XMMword in
5806 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5807 if (!cpu_flags_all_zero (&cpu
)
5808 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5809 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5811 for (op
= 0; op
< t
->operands
; ++op
)
5813 if (t
->operand_types
[op
].bitfield
.zmmword
5814 && (i
.types
[op
].bitfield
.ymmword
5815 || i
.types
[op
].bitfield
.xmmword
))
5817 i
.error
= unsupported
;
5823 /* Without VSIB byte, we can't have a vector register for index. */
5824 if (!t
->opcode_modifier
.sib
5826 && (i
.index_reg
->reg_type
.bitfield
.xmmword
5827 || i
.index_reg
->reg_type
.bitfield
.ymmword
5828 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
5830 i
.error
= unsupported_vector_index_register
;
5834 /* Check if default mask is allowed. */
5835 if (t
->opcode_modifier
.nodefmask
5836 && (!i
.mask
.reg
|| i
.mask
.reg
->reg_num
== 0))
5838 i
.error
= no_default_mask
;
5842 /* For VSIB byte, we need a vector register for index, and all vector
5843 registers must be distinct. */
5844 if (t
->opcode_modifier
.sib
&& t
->opcode_modifier
.sib
!= SIBMEM
)
5847 || !((t
->opcode_modifier
.sib
== VECSIB128
5848 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
5849 || (t
->opcode_modifier
.sib
== VECSIB256
5850 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
5851 || (t
->opcode_modifier
.sib
== VECSIB512
5852 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
5854 i
.error
= invalid_vsib_address
;
5858 gas_assert (i
.reg_operands
== 2 || i
.mask
.reg
);
5859 if (i
.reg_operands
== 2 && !i
.mask
.reg
)
5861 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
5862 gas_assert (i
.types
[0].bitfield
.xmmword
5863 || i
.types
[0].bitfield
.ymmword
);
5864 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
5865 gas_assert (i
.types
[2].bitfield
.xmmword
5866 || i
.types
[2].bitfield
.ymmword
);
5867 if (operand_check
== check_none
)
5869 if (register_number (i
.op
[0].regs
)
5870 != register_number (i
.index_reg
)
5871 && register_number (i
.op
[2].regs
)
5872 != register_number (i
.index_reg
)
5873 && register_number (i
.op
[0].regs
)
5874 != register_number (i
.op
[2].regs
))
5876 if (operand_check
== check_error
)
5878 i
.error
= invalid_vector_register_set
;
5881 as_warn (_("mask, index, and destination registers should be distinct"));
5883 else if (i
.reg_operands
== 1 && i
.mask
.reg
)
5885 if (i
.types
[1].bitfield
.class == RegSIMD
5886 && (i
.types
[1].bitfield
.xmmword
5887 || i
.types
[1].bitfield
.ymmword
5888 || i
.types
[1].bitfield
.zmmword
)
5889 && (register_number (i
.op
[1].regs
)
5890 == register_number (i
.index_reg
)))
5892 if (operand_check
== check_error
)
5894 i
.error
= invalid_vector_register_set
;
5897 if (operand_check
!= check_none
)
5898 as_warn (_("index and destination registers should be distinct"));
5903 /* For AMX instructions with three tmmword operands, all tmmword operand must be
5905 if (t
->operand_types
[0].bitfield
.tmmword
5906 && i
.reg_operands
== 3)
5908 if (register_number (i
.op
[0].regs
)
5909 == register_number (i
.op
[1].regs
)
5910 || register_number (i
.op
[0].regs
)
5911 == register_number (i
.op
[2].regs
)
5912 || register_number (i
.op
[1].regs
)
5913 == register_number (i
.op
[2].regs
))
5915 i
.error
= invalid_tmm_register_set
;
5920 /* Check if broadcast is supported by the instruction and is applied
5921 to the memory operand. */
5922 if (i
.broadcast
.type
)
5924 i386_operand_type type
, overlap
;
5926 /* Check if specified broadcast is supported in this instruction,
5927 and its broadcast bytes match the memory operand. */
5928 op
= i
.broadcast
.operand
;
5929 if (!t
->opcode_modifier
.broadcast
5930 || !(i
.flags
[op
] & Operand_Mem
)
5931 || (!i
.types
[op
].bitfield
.unspecified
5932 && !match_broadcast_size (t
, op
)))
5935 i
.error
= unsupported_broadcast
;
5939 i
.broadcast
.bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
5940 * i
.broadcast
.type
);
5941 operand_type_set (&type
, 0);
5942 switch (i
.broadcast
.bytes
)
5945 type
.bitfield
.word
= 1;
5948 type
.bitfield
.dword
= 1;
5951 type
.bitfield
.qword
= 1;
5954 type
.bitfield
.xmmword
= 1;
5957 type
.bitfield
.ymmword
= 1;
5960 type
.bitfield
.zmmword
= 1;
5966 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
5967 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
5968 && t
->operand_types
[op
].bitfield
.byte
5969 + t
->operand_types
[op
].bitfield
.word
5970 + t
->operand_types
[op
].bitfield
.dword
5971 + t
->operand_types
[op
].bitfield
.qword
> 1)
5973 overlap
.bitfield
.xmmword
= 0;
5974 overlap
.bitfield
.ymmword
= 0;
5975 overlap
.bitfield
.zmmword
= 0;
5977 if (operand_type_all_zero (&overlap
))
5980 if (t
->opcode_modifier
.checkregsize
)
5984 type
.bitfield
.baseindex
= 1;
5985 for (j
= 0; j
< i
.operands
; ++j
)
5988 && !operand_type_register_match(i
.types
[j
],
5989 t
->operand_types
[j
],
5991 t
->operand_types
[op
]))
5996 /* If broadcast is supported in this instruction, we need to check if
5997 operand of one-element size isn't specified without broadcast. */
5998 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
6000 /* Find memory operand. */
6001 for (op
= 0; op
< i
.operands
; op
++)
6002 if (i
.flags
[op
] & Operand_Mem
)
6004 gas_assert (op
< i
.operands
);
6005 /* Check size of the memory operand. */
6006 if (match_broadcast_size (t
, op
))
6008 i
.error
= broadcast_needed
;
6013 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
6015 /* Check if requested masking is supported. */
6018 switch (t
->opcode_modifier
.masking
)
6022 case MERGING_MASKING
:
6026 i
.error
= unsupported_masking
;
6030 case DYNAMIC_MASKING
:
6031 /* Memory destinations allow only merging masking. */
6032 if (i
.mask
.zeroing
&& i
.mem_operands
)
6034 /* Find memory operand. */
6035 for (op
= 0; op
< i
.operands
; op
++)
6036 if (i
.flags
[op
] & Operand_Mem
)
6038 gas_assert (op
< i
.operands
);
6039 if (op
== i
.operands
- 1)
6041 i
.error
= unsupported_masking
;
6051 /* Check if masking is applied to dest operand. */
6052 if (i
.mask
.reg
&& (i
.mask
.operand
!= i
.operands
- 1))
6054 i
.error
= mask_not_on_destination
;
6059 if (i
.rounding
.type
!= rc_none
)
6061 if (!t
->opcode_modifier
.sae
6062 || (i
.rounding
.type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
6064 i
.error
= unsupported_rc_sae
;
6067 /* If the instruction has several immediate operands and one of
6068 them is rounding, the rounding operand should be the last
6069 immediate operand. */
6070 if (i
.imm_operands
> 1
6071 && i
.rounding
.operand
!= i
.imm_operands
- 1)
6073 i
.error
= rc_sae_operand_not_last_imm
;
6078 /* Check the special Imm4 cases; must be the first operand. */
6079 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6081 if (i
.op
[0].imms
->X_op
!= O_constant
6082 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6088 /* Turn off Imm<N> so that update_imm won't complain. */
6089 operand_type_set (&i
.types
[0], 0);
6092 /* Check vector Disp8 operand. */
6093 if (t
->opcode_modifier
.disp8memshift
6094 && i
.disp_encoding
!= disp_encoding_32bit
)
6096 if (i
.broadcast
.type
)
6097 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6098 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6099 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6102 const i386_operand_type
*type
= NULL
;
6105 for (op
= 0; op
< i
.operands
; op
++)
6106 if (i
.flags
[op
] & Operand_Mem
)
6108 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6109 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6110 else if (t
->operand_types
[op
].bitfield
.xmmword
6111 + t
->operand_types
[op
].bitfield
.ymmword
6112 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6113 type
= &t
->operand_types
[op
];
6114 else if (!i
.types
[op
].bitfield
.unspecified
)
6115 type
= &i
.types
[op
];
6117 else if (i
.types
[op
].bitfield
.class == RegSIMD
6118 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6120 if (i
.types
[op
].bitfield
.zmmword
)
6122 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6124 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6130 if (type
->bitfield
.zmmword
)
6132 else if (type
->bitfield
.ymmword
)
6134 else if (type
->bitfield
.xmmword
)
6138 /* For the check in fits_in_disp8(). */
6139 if (i
.memshift
== 0)
6143 for (op
= 0; op
< i
.operands
; op
++)
6144 if (operand_type_check (i
.types
[op
], disp
)
6145 && i
.op
[op
].disps
->X_op
== O_constant
)
6147 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6149 i
.types
[op
].bitfield
.disp8
= 1;
6152 i
.types
[op
].bitfield
.disp8
= 0;
6161 /* Check if encoding requirements are met by the instruction. */
6164 VEX_check_encoding (const insn_template
*t
)
6166 if (i
.vec_encoding
== vex_encoding_error
)
6168 i
.error
= unsupported
;
6172 if (i
.vec_encoding
== vex_encoding_evex
)
6174 /* This instruction must be encoded with EVEX prefix. */
6175 if (!is_evex_encoding (t
))
6177 i
.error
= unsupported
;
6183 if (!t
->opcode_modifier
.vex
)
6185 /* This instruction template doesn't have VEX prefix. */
6186 if (i
.vec_encoding
!= vex_encoding_default
)
6188 i
.error
= unsupported
;
6197 static const insn_template
*
6198 match_template (char mnem_suffix
)
6200 /* Points to template once we've found it. */
6201 const insn_template
*t
;
6202 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6203 i386_operand_type overlap4
;
6204 unsigned int found_reverse_match
;
6205 i386_opcode_modifier suffix_check
;
6206 i386_operand_type operand_types
[MAX_OPERANDS
];
6207 int addr_prefix_disp
;
6208 unsigned int j
, size_match
, check_register
;
6209 enum i386_error specific_error
= 0;
6211 #if MAX_OPERANDS != 5
6212 # error "MAX_OPERANDS must be 5."
6215 found_reverse_match
= 0;
6216 addr_prefix_disp
= -1;
6218 /* Prepare for mnemonic suffix check. */
6219 memset (&suffix_check
, 0, sizeof (suffix_check
));
6220 switch (mnem_suffix
)
6222 case BYTE_MNEM_SUFFIX
:
6223 suffix_check
.no_bsuf
= 1;
6225 case WORD_MNEM_SUFFIX
:
6226 suffix_check
.no_wsuf
= 1;
6228 case SHORT_MNEM_SUFFIX
:
6229 suffix_check
.no_ssuf
= 1;
6231 case LONG_MNEM_SUFFIX
:
6232 suffix_check
.no_lsuf
= 1;
6234 case QWORD_MNEM_SUFFIX
:
6235 suffix_check
.no_qsuf
= 1;
6238 /* NB: In Intel syntax, normally we can check for memory operand
6239 size when there is no mnemonic suffix. But jmp and call have
6240 2 different encodings with Dword memory operand size, one with
6241 No_ldSuf and the other without. i.suffix is set to
6242 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6243 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6244 suffix_check
.no_ldsuf
= 1;
6247 /* Must have right number of operands. */
6248 i
.error
= number_of_operands_mismatch
;
6250 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6252 addr_prefix_disp
= -1;
6253 found_reverse_match
= 0;
6255 if (i
.operands
!= t
->operands
)
6258 /* Check processor support. */
6259 i
.error
= unsupported
;
6260 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6263 /* Check Pseudo Prefix. */
6264 i
.error
= unsupported
;
6265 if (t
->opcode_modifier
.pseudovexprefix
6266 && !(i
.vec_encoding
== vex_encoding_vex
6267 || i
.vec_encoding
== vex_encoding_vex3
))
6270 /* Check AT&T mnemonic. */
6271 i
.error
= unsupported_with_intel_mnemonic
;
6272 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6275 /* Check AT&T/Intel syntax. */
6276 i
.error
= unsupported_syntax
;
6277 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6278 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6281 /* Check Intel64/AMD64 ISA. */
6285 /* Default: Don't accept Intel64. */
6286 if (t
->opcode_modifier
.isa64
== INTEL64
)
6290 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6291 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6295 /* -mintel64: Don't accept AMD64. */
6296 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6301 /* Check the suffix. */
6302 i
.error
= invalid_instruction_suffix
;
6303 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6304 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6305 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6306 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6307 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6308 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6311 size_match
= operand_size_match (t
);
6315 /* This is intentionally not
6317 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6319 as the case of a missing * on the operand is accepted (perhaps with
6320 a warning, issued further down). */
6321 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6323 i
.error
= operand_type_mismatch
;
6327 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6328 operand_types
[j
] = t
->operand_types
[j
];
6330 /* In general, don't allow
6331 - 64-bit operands outside of 64-bit mode,
6332 - 32-bit operands on pre-386. */
6333 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6334 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6335 && flag_code
!= CODE_64BIT
6336 && !(t
->opcode_modifier
.opcodespace
== SPACE_0F
6337 && t
->base_opcode
== 0xc7
6338 && t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
6339 && t
->extension_opcode
== 1) /* cmpxchg8b */)
6340 || (i
.suffix
== LONG_MNEM_SUFFIX
6341 && !cpu_arch_flags
.bitfield
.cpui386
))
6343 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6344 && !intel_float_operand (t
->name
))
6345 : intel_float_operand (t
->name
) != 2)
6346 && (t
->operands
== i
.imm_operands
6347 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6348 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6349 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6350 || (operand_types
[j
].bitfield
.class != RegMMX
6351 && operand_types
[j
].bitfield
.class != RegSIMD
6352 && operand_types
[j
].bitfield
.class != RegMask
))
6353 && !t
->opcode_modifier
.sib
)
6356 /* Do not verify operands when there are none. */
6359 if (VEX_check_encoding (t
))
6361 specific_error
= i
.error
;
6365 /* We've found a match; break out of loop. */
6369 if (!t
->opcode_modifier
.jump
6370 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6372 /* There should be only one Disp operand. */
6373 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6374 if (operand_type_check (operand_types
[j
], disp
))
6376 if (j
< MAX_OPERANDS
)
6378 bool override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6380 addr_prefix_disp
= j
;
6382 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6383 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6387 override
= !override
;
6390 if (operand_types
[j
].bitfield
.disp32
6391 && operand_types
[j
].bitfield
.disp16
)
6393 operand_types
[j
].bitfield
.disp16
= override
;
6394 operand_types
[j
].bitfield
.disp32
= !override
;
6396 operand_types
[j
].bitfield
.disp32s
= 0;
6397 operand_types
[j
].bitfield
.disp64
= 0;
6401 if (operand_types
[j
].bitfield
.disp32s
6402 || operand_types
[j
].bitfield
.disp64
)
6404 operand_types
[j
].bitfield
.disp64
&= !override
;
6405 operand_types
[j
].bitfield
.disp32s
&= !override
;
6406 operand_types
[j
].bitfield
.disp32
= override
;
6408 operand_types
[j
].bitfield
.disp16
= 0;
6414 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6415 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
6416 && t
->base_opcode
== 0xa0
6417 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
)
6420 /* We check register size if needed. */
6421 if (t
->opcode_modifier
.checkregsize
)
6423 check_register
= (1 << t
->operands
) - 1;
6424 if (i
.broadcast
.type
)
6425 check_register
&= ~(1 << i
.broadcast
.operand
);
6430 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6431 switch (t
->operands
)
6434 if (!operand_type_match (overlap0
, i
.types
[0]))
6438 /* xchg %eax, %eax is a special case. It is an alias for nop
6439 only in 32bit mode and we can use opcode 0x90. In 64bit
6440 mode, we can't use 0x90 for xchg %eax, %eax since it should
6441 zero-extend %eax to %rax. */
6442 if (flag_code
== CODE_64BIT
6443 && t
->base_opcode
== 0x90
6444 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6445 && i
.types
[0].bitfield
.instance
== Accum
6446 && i
.types
[0].bitfield
.dword
6447 && i
.types
[1].bitfield
.instance
== Accum
6448 && i
.types
[1].bitfield
.dword
)
6450 /* xrelease mov %eax, <disp> is another special case. It must not
6451 match the accumulator-only encoding of mov. */
6452 if (flag_code
!= CODE_64BIT
6454 && t
->base_opcode
== 0xa0
6455 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6456 && i
.types
[0].bitfield
.instance
== Accum
6457 && (i
.flags
[1] & Operand_Mem
))
6462 if (!(size_match
& MATCH_STRAIGHT
))
6464 /* Reverse direction of operands if swapping is possible in the first
6465 place (operands need to be symmetric) and
6466 - the load form is requested, and the template is a store form,
6467 - the store form is requested, and the template is a load form,
6468 - the non-default (swapped) form is requested. */
6469 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6470 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6471 && !operand_type_all_zero (&overlap1
))
6472 switch (i
.dir_encoding
)
6474 case dir_encoding_load
:
6475 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6476 || t
->opcode_modifier
.regmem
)
6480 case dir_encoding_store
:
6481 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6482 && !t
->opcode_modifier
.regmem
)
6486 case dir_encoding_swap
:
6489 case dir_encoding_default
:
6492 /* If we want store form, we skip the current load. */
6493 if ((i
.dir_encoding
== dir_encoding_store
6494 || i
.dir_encoding
== dir_encoding_swap
)
6495 && i
.mem_operands
== 0
6496 && t
->opcode_modifier
.load
)
6501 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6502 if (!operand_type_match (overlap0
, i
.types
[0])
6503 || !operand_type_match (overlap1
, i
.types
[1])
6504 || ((check_register
& 3) == 3
6505 && !operand_type_register_match (i
.types
[0],
6510 /* Check if other direction is valid ... */
6511 if (!t
->opcode_modifier
.d
)
6515 if (!(size_match
& MATCH_REVERSE
))
6517 /* Try reversing direction of operands. */
6518 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6519 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6520 if (!operand_type_match (overlap0
, i
.types
[0])
6521 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6523 && !operand_type_register_match (i
.types
[0],
6524 operand_types
[i
.operands
- 1],
6525 i
.types
[i
.operands
- 1],
6528 /* Does not match either direction. */
6531 /* found_reverse_match holds which of D or FloatR
6533 if (!t
->opcode_modifier
.d
)
6534 found_reverse_match
= 0;
6535 else if (operand_types
[0].bitfield
.tbyte
)
6536 found_reverse_match
= Opcode_FloatD
;
6537 else if (operand_types
[0].bitfield
.xmmword
6538 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6539 || operand_types
[0].bitfield
.class == RegMMX
6540 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6541 || is_any_vex_encoding(t
))
6542 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6543 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6545 found_reverse_match
= Opcode_D
;
6546 if (t
->opcode_modifier
.floatr
)
6547 found_reverse_match
|= Opcode_FloatR
;
6551 /* Found a forward 2 operand match here. */
6552 switch (t
->operands
)
6555 overlap4
= operand_type_and (i
.types
[4],
6559 overlap3
= operand_type_and (i
.types
[3],
6563 overlap2
= operand_type_and (i
.types
[2],
6568 switch (t
->operands
)
6571 if (!operand_type_match (overlap4
, i
.types
[4])
6572 || !operand_type_register_match (i
.types
[3],
6579 if (!operand_type_match (overlap3
, i
.types
[3])
6580 || ((check_register
& 0xa) == 0xa
6581 && !operand_type_register_match (i
.types
[1],
6585 || ((check_register
& 0xc) == 0xc
6586 && !operand_type_register_match (i
.types
[2],
6593 /* Here we make use of the fact that there are no
6594 reverse match 3 operand instructions. */
6595 if (!operand_type_match (overlap2
, i
.types
[2])
6596 || ((check_register
& 5) == 5
6597 && !operand_type_register_match (i
.types
[0],
6601 || ((check_register
& 6) == 6
6602 && !operand_type_register_match (i
.types
[1],
6610 /* Found either forward/reverse 2, 3 or 4 operand match here:
6611 slip through to break. */
6614 /* Check if vector operands are valid. */
6615 if (check_VecOperands (t
))
6617 specific_error
= i
.error
;
6621 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6622 if (VEX_check_encoding (t
))
6624 specific_error
= i
.error
;
6628 /* We've found a match; break out of loop. */
6632 if (t
== current_templates
->end
)
6634 /* We found no match. */
6635 const char *err_msg
;
6636 switch (specific_error
? specific_error
: i
.error
)
6640 case operand_size_mismatch
:
6641 err_msg
= _("operand size mismatch");
6643 case operand_type_mismatch
:
6644 err_msg
= _("operand type mismatch");
6646 case register_type_mismatch
:
6647 err_msg
= _("register type mismatch");
6649 case number_of_operands_mismatch
:
6650 err_msg
= _("number of operands mismatch");
6652 case invalid_instruction_suffix
:
6653 err_msg
= _("invalid instruction suffix");
6656 err_msg
= _("constant doesn't fit in 4 bits");
6658 case unsupported_with_intel_mnemonic
:
6659 err_msg
= _("unsupported with Intel mnemonic");
6661 case unsupported_syntax
:
6662 err_msg
= _("unsupported syntax");
6665 as_bad (_("unsupported instruction `%s'"),
6666 current_templates
->start
->name
);
6668 case invalid_sib_address
:
6669 err_msg
= _("invalid SIB address");
6671 case invalid_vsib_address
:
6672 err_msg
= _("invalid VSIB address");
6674 case invalid_vector_register_set
:
6675 err_msg
= _("mask, index, and destination registers must be distinct");
6677 case invalid_tmm_register_set
:
6678 err_msg
= _("all tmm registers must be distinct");
6680 case unsupported_vector_index_register
:
6681 err_msg
= _("unsupported vector index register");
6683 case unsupported_broadcast
:
6684 err_msg
= _("unsupported broadcast");
6686 case broadcast_needed
:
6687 err_msg
= _("broadcast is needed for operand of such type");
6689 case unsupported_masking
:
6690 err_msg
= _("unsupported masking");
6692 case mask_not_on_destination
:
6693 err_msg
= _("mask not on destination operand");
6695 case no_default_mask
:
6696 err_msg
= _("default mask isn't allowed");
6698 case unsupported_rc_sae
:
6699 err_msg
= _("unsupported static rounding/sae");
6701 case rc_sae_operand_not_last_imm
:
6703 err_msg
= _("RC/SAE operand must precede immediate operands");
6705 err_msg
= _("RC/SAE operand must follow immediate operands");
6707 case invalid_register_operand
:
6708 err_msg
= _("invalid register operand");
6711 as_bad (_("%s for `%s'"), err_msg
,
6712 current_templates
->start
->name
);
6716 if (!quiet_warnings
)
6719 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6720 as_warn (_("indirect %s without `*'"), t
->name
);
6722 if (t
->opcode_modifier
.isprefix
6723 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6725 /* Warn them that a data or address size prefix doesn't
6726 affect assembly of the next line of code. */
6727 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6731 /* Copy the template we found. */
6732 install_template (t
);
6734 if (addr_prefix_disp
!= -1)
6735 i
.tm
.operand_types
[addr_prefix_disp
]
6736 = operand_types
[addr_prefix_disp
];
6738 if (found_reverse_match
)
6740 /* If we found a reverse match we must alter the opcode direction
6741 bit and clear/flip the regmem modifier one. found_reverse_match
6742 holds bits to change (different for int & float insns). */
6744 i
.tm
.base_opcode
^= found_reverse_match
;
6746 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6747 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6749 /* Certain SIMD insns have their load forms specified in the opcode
6750 table, and hence we need to _set_ RegMem instead of clearing it.
6751 We need to avoid setting the bit though on insns like KMOVW. */
6752 i
.tm
.opcode_modifier
.regmem
6753 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6754 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6755 && !i
.tm
.opcode_modifier
.regmem
;
6764 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
6765 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
6767 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != reg_es
)
6769 as_bad (_("`%s' operand %u must use `%ses' segment"),
6771 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
6776 /* There's only ever one segment override allowed per instruction.
6777 This instruction possibly has a legal segment override on the
6778 second operand, so copy the segment to where non-string
6779 instructions store it, allowing common code. */
6780 i
.seg
[op
] = i
.seg
[1];
6786 process_suffix (void)
6788 bool is_crc32
= false, is_movx
= false;
6790 /* If matched instruction specifies an explicit instruction mnemonic
6792 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6793 i
.suffix
= WORD_MNEM_SUFFIX
;
6794 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6795 i
.suffix
= LONG_MNEM_SUFFIX
;
6796 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6797 i
.suffix
= QWORD_MNEM_SUFFIX
;
6798 else if (i
.reg_operands
6799 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
6800 && !i
.tm
.opcode_modifier
.addrprefixopreg
)
6802 unsigned int numop
= i
.operands
;
6805 is_movx
= (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
6806 && (i
.tm
.base_opcode
| 8) == 0xbe)
6807 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
6808 && i
.tm
.base_opcode
== 0x63
6809 && i
.tm
.cpu_flags
.bitfield
.cpu64
);
6812 is_crc32
= (i
.tm
.base_opcode
== 0xf0
6813 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
6814 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
);
6816 /* movsx/movzx want only their source operand considered here, for the
6817 ambiguity checking below. The suffix will be replaced afterwards
6818 to represent the destination (register). */
6819 if (is_movx
&& (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63))
6822 /* crc32 needs REX.W set regardless of suffix / source operand size. */
6823 if (is_crc32
&& i
.tm
.operand_types
[1].bitfield
.qword
)
6826 /* If there's no instruction mnemonic suffix we try to invent one
6827 based on GPR operands. */
6830 /* We take i.suffix from the last register operand specified,
6831 Destination register type is more significant than source
6832 register type. crc32 in SSE4.2 prefers source register
6834 unsigned int op
= is_crc32
? 1 : i
.operands
;
6837 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
6838 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6840 if (i
.types
[op
].bitfield
.class != Reg
)
6842 if (i
.types
[op
].bitfield
.byte
)
6843 i
.suffix
= BYTE_MNEM_SUFFIX
;
6844 else if (i
.types
[op
].bitfield
.word
)
6845 i
.suffix
= WORD_MNEM_SUFFIX
;
6846 else if (i
.types
[op
].bitfield
.dword
)
6847 i
.suffix
= LONG_MNEM_SUFFIX
;
6848 else if (i
.types
[op
].bitfield
.qword
)
6849 i
.suffix
= QWORD_MNEM_SUFFIX
;
6855 /* As an exception, movsx/movzx silently default to a byte source
6857 if (is_movx
&& i
.tm
.opcode_modifier
.w
&& !i
.suffix
&& !intel_syntax
)
6858 i
.suffix
= BYTE_MNEM_SUFFIX
;
6860 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6863 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6864 && i
.tm
.opcode_modifier
.no_bsuf
)
6866 else if (!check_byte_reg ())
6869 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
6872 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6873 && i
.tm
.opcode_modifier
.no_lsuf
6874 && !i
.tm
.opcode_modifier
.todword
6875 && !i
.tm
.opcode_modifier
.toqword
)
6877 else if (!check_long_reg ())
6880 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6883 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6884 && i
.tm
.opcode_modifier
.no_qsuf
6885 && !i
.tm
.opcode_modifier
.todword
6886 && !i
.tm
.opcode_modifier
.toqword
)
6888 else if (!check_qword_reg ())
6891 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6894 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6895 && i
.tm
.opcode_modifier
.no_wsuf
)
6897 else if (!check_word_reg ())
6900 else if (intel_syntax
6901 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6902 /* Do nothing if the instruction is going to ignore the prefix. */
6907 /* Undo the movsx/movzx change done above. */
6910 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
6913 i
.suffix
= stackop_size
;
6914 if (stackop_size
== LONG_MNEM_SUFFIX
)
6916 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6917 .code16gcc directive to support 16-bit mode with
6918 32-bit address. For IRET without a suffix, generate
6919 16-bit IRET (opcode 0xcf) to return from an interrupt
6921 if (i
.tm
.base_opcode
== 0xcf)
6923 i
.suffix
= WORD_MNEM_SUFFIX
;
6924 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6926 /* Warn about changed behavior for segment register push/pop. */
6927 else if ((i
.tm
.base_opcode
| 1) == 0x07)
6928 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
6933 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
6934 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
6935 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
6936 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
6937 && i
.tm
.base_opcode
== 0x01 /* [ls][gi]dt */
6938 && i
.tm
.extension_opcode
<= 3)))
6943 if (!i
.tm
.opcode_modifier
.no_qsuf
)
6945 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
6946 || i
.tm
.opcode_modifier
.no_lsuf
)
6947 i
.suffix
= QWORD_MNEM_SUFFIX
;
6952 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6953 i
.suffix
= LONG_MNEM_SUFFIX
;
6956 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6957 i
.suffix
= WORD_MNEM_SUFFIX
;
6963 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
6964 /* Also cover lret/retf/iret in 64-bit mode. */
6965 || (flag_code
== CODE_64BIT
6966 && !i
.tm
.opcode_modifier
.no_lsuf
6967 && !i
.tm
.opcode_modifier
.no_qsuf
))
6968 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
6969 /* Explicit sizing prefixes are assumed to disambiguate insns. */
6970 && !i
.prefix
[DATA_PREFIX
] && !(i
.prefix
[REX_PREFIX
] & REX_W
)
6971 /* Accept FLDENV et al without suffix. */
6972 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
6974 unsigned int suffixes
, evex
= 0;
6976 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
6977 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6979 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6981 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
6983 if (!i
.tm
.opcode_modifier
.no_ssuf
)
6985 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
6988 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
6989 also suitable for AT&T syntax mode, it was requested that this be
6990 restricted to just Intel syntax. */
6991 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
) && !i
.broadcast
.type
)
6995 for (op
= 0; op
< i
.tm
.operands
; ++op
)
6997 if (is_evex_encoding (&i
.tm
)
6998 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
7000 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7001 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
7002 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7003 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
7004 if (!i
.tm
.opcode_modifier
.evex
7005 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
7006 i
.tm
.opcode_modifier
.evex
= EVEX512
;
7009 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
7010 + i
.tm
.operand_types
[op
].bitfield
.ymmword
7011 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
7014 /* Any properly sized operand disambiguates the insn. */
7015 if (i
.types
[op
].bitfield
.xmmword
7016 || i
.types
[op
].bitfield
.ymmword
7017 || i
.types
[op
].bitfield
.zmmword
)
7019 suffixes
&= ~(7 << 6);
7024 if ((i
.flags
[op
] & Operand_Mem
)
7025 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
7027 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
7029 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7031 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7033 if (is_evex_encoding (&i
.tm
))
7039 /* Are multiple suffixes / operand sizes allowed? */
7040 if (suffixes
& (suffixes
- 1))
7043 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7044 || operand_check
== check_error
))
7046 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
7049 if (operand_check
== check_error
)
7051 as_bad (_("no instruction mnemonic suffix given and "
7052 "no register operands; can't size `%s'"), i
.tm
.name
);
7055 if (operand_check
== check_warning
)
7056 as_warn (_("%s; using default for `%s'"),
7058 ? _("ambiguous operand size")
7059 : _("no instruction mnemonic suffix given and "
7060 "no register operands"),
7063 if (i
.tm
.opcode_modifier
.floatmf
)
7064 i
.suffix
= SHORT_MNEM_SUFFIX
;
7066 /* handled below */;
7068 i
.tm
.opcode_modifier
.evex
= evex
;
7069 else if (flag_code
== CODE_16BIT
)
7070 i
.suffix
= WORD_MNEM_SUFFIX
;
7071 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
7072 i
.suffix
= LONG_MNEM_SUFFIX
;
7074 i
.suffix
= QWORD_MNEM_SUFFIX
;
7080 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
7081 In AT&T syntax, if there is no suffix (warned about above), the default
7082 will be byte extension. */
7083 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
7084 i
.tm
.base_opcode
|= 1;
7086 /* For further processing, the suffix should represent the destination
7087 (register). This is already the case when one was used with
7088 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
7089 no suffix to begin with. */
7090 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
7092 if (i
.types
[1].bitfield
.word
)
7093 i
.suffix
= WORD_MNEM_SUFFIX
;
7094 else if (i
.types
[1].bitfield
.qword
)
7095 i
.suffix
= QWORD_MNEM_SUFFIX
;
7097 i
.suffix
= LONG_MNEM_SUFFIX
;
7099 i
.tm
.opcode_modifier
.w
= 0;
7103 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
7104 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
7105 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
7107 /* Change the opcode based on the operand size given by i.suffix. */
7110 /* Size floating point instruction. */
7111 case LONG_MNEM_SUFFIX
:
7112 if (i
.tm
.opcode_modifier
.floatmf
)
7114 i
.tm
.base_opcode
^= 4;
7118 case WORD_MNEM_SUFFIX
:
7119 case QWORD_MNEM_SUFFIX
:
7120 /* It's not a byte, select word/dword operation. */
7121 if (i
.tm
.opcode_modifier
.w
)
7124 i
.tm
.base_opcode
|= 8;
7126 i
.tm
.base_opcode
|= 1;
7129 case SHORT_MNEM_SUFFIX
:
7130 /* Now select between word & dword operations via the operand
7131 size prefix, except for instructions that will ignore this
7133 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7134 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7135 && !i
.tm
.opcode_modifier
.floatmf
7136 && !is_any_vex_encoding (&i
.tm
)
7137 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7138 || (flag_code
== CODE_64BIT
7139 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7141 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7143 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7144 prefix
= ADDR_PREFIX_OPCODE
;
7146 if (!add_prefix (prefix
))
7150 /* Set mode64 for an operand. */
7151 if (i
.suffix
== QWORD_MNEM_SUFFIX
7152 && flag_code
== CODE_64BIT
7153 && !i
.tm
.opcode_modifier
.norex64
7154 && !i
.tm
.opcode_modifier
.vexw
7155 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7157 && ! (i
.operands
== 2
7158 && i
.tm
.base_opcode
== 0x90
7159 && i
.tm
.extension_opcode
== None
7160 && i
.types
[0].bitfield
.instance
== Accum
7161 && i
.types
[0].bitfield
.qword
7162 && i
.types
[1].bitfield
.instance
== Accum
7163 && i
.types
[1].bitfield
.qword
))
7169 /* Select word/dword/qword operation with explicit data sizing prefix
7170 when there are no suitable register operands. */
7171 if (i
.tm
.opcode_modifier
.w
7172 && (i
.prefix
[DATA_PREFIX
] || (i
.prefix
[REX_PREFIX
] & REX_W
))
7174 || (i
.reg_operands
== 1
7176 && (i
.tm
.operand_types
[0].bitfield
.instance
== RegC
7178 || i
.tm
.operand_types
[0].bitfield
.instance
== RegD
7179 || i
.tm
.operand_types
[1].bitfield
.instance
== RegD
7182 i
.tm
.base_opcode
|= 1;
7186 if (i
.tm
.opcode_modifier
.addrprefixopreg
)
7188 gas_assert (!i
.suffix
);
7189 gas_assert (i
.reg_operands
);
7191 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7194 /* The address size override prefix changes the size of the
7196 if (flag_code
== CODE_64BIT
7197 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7199 as_bad (_("16-bit addressing unavailable for `%s'"),
7204 if ((flag_code
== CODE_32BIT
7205 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7206 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7207 && !add_prefix (ADDR_PREFIX_OPCODE
))
7212 /* Check invalid register operand when the address size override
7213 prefix changes the size of register operands. */
7215 enum { need_word
, need_dword
, need_qword
} need
;
7217 /* Check the register operand for the address size prefix if
7218 the memory operand has no real registers, like symbol, DISP
7219 or bogus (x32-only) symbol(%rip) when symbol(%eip) is meant. */
7220 if (i
.mem_operands
== 1
7221 && i
.reg_operands
== 1
7223 && i
.types
[1].bitfield
.class == Reg
7224 && (flag_code
== CODE_32BIT
7225 ? i
.op
[1].regs
->reg_type
.bitfield
.word
7226 : i
.op
[1].regs
->reg_type
.bitfield
.dword
)
7227 && ((i
.base_reg
== NULL
&& i
.index_reg
== NULL
)
7228 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7229 || (x86_elf_abi
== X86_64_X32_ABI
7231 && i
.base_reg
->reg_num
== RegIP
7232 && i
.base_reg
->reg_type
.bitfield
.qword
))
7236 && !add_prefix (ADDR_PREFIX_OPCODE
))
7239 if (flag_code
== CODE_32BIT
)
7240 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7241 else if (i
.prefix
[ADDR_PREFIX
])
7244 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7246 for (op
= 0; op
< i
.operands
; op
++)
7248 if (i
.types
[op
].bitfield
.class != Reg
)
7254 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7258 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7262 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7267 as_bad (_("invalid register operand size for `%s'"),
7278 check_byte_reg (void)
7282 for (op
= i
.operands
; --op
>= 0;)
7284 /* Skip non-register operands. */
7285 if (i
.types
[op
].bitfield
.class != Reg
)
7288 /* If this is an eight bit register, it's OK. If it's the 16 or
7289 32 bit version of an eight bit register, we will just use the
7290 low portion, and that's OK too. */
7291 if (i
.types
[op
].bitfield
.byte
)
7294 /* I/O port address operands are OK too. */
7295 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7296 && i
.tm
.operand_types
[op
].bitfield
.word
)
7299 /* crc32 only wants its source operand checked here. */
7300 if (i
.tm
.base_opcode
== 0xf0
7301 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7302 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
7306 /* Any other register is bad. */
7307 as_bad (_("`%s%s' not allowed with `%s%c'"),
7308 register_prefix
, i
.op
[op
].regs
->reg_name
,
7309 i
.tm
.name
, i
.suffix
);
7316 check_long_reg (void)
7320 for (op
= i
.operands
; --op
>= 0;)
7321 /* Skip non-register operands. */
7322 if (i
.types
[op
].bitfield
.class != Reg
)
7324 /* Reject eight bit registers, except where the template requires
7325 them. (eg. movzb) */
7326 else if (i
.types
[op
].bitfield
.byte
7327 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7328 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7329 && (i
.tm
.operand_types
[op
].bitfield
.word
7330 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7332 as_bad (_("`%s%s' not allowed with `%s%c'"),
7334 i
.op
[op
].regs
->reg_name
,
7339 /* Error if the e prefix on a general reg is missing. */
7340 else if (i
.types
[op
].bitfield
.word
7341 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7342 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7343 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7345 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7346 register_prefix
, i
.op
[op
].regs
->reg_name
,
7350 /* Warn if the r prefix on a general reg is present. */
7351 else if (i
.types
[op
].bitfield
.qword
7352 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7353 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7354 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7357 && i
.tm
.opcode_modifier
.toqword
7358 && i
.types
[0].bitfield
.class != RegSIMD
)
7360 /* Convert to QWORD. We want REX byte. */
7361 i
.suffix
= QWORD_MNEM_SUFFIX
;
7365 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7366 register_prefix
, i
.op
[op
].regs
->reg_name
,
7375 check_qword_reg (void)
7379 for (op
= i
.operands
; --op
>= 0; )
7380 /* Skip non-register operands. */
7381 if (i
.types
[op
].bitfield
.class != Reg
)
7383 /* Reject eight bit registers, except where the template requires
7384 them. (eg. movzb) */
7385 else if (i
.types
[op
].bitfield
.byte
7386 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7387 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7388 && (i
.tm
.operand_types
[op
].bitfield
.word
7389 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7391 as_bad (_("`%s%s' not allowed with `%s%c'"),
7393 i
.op
[op
].regs
->reg_name
,
7398 /* Warn if the r prefix on a general reg is missing. */
7399 else if ((i
.types
[op
].bitfield
.word
7400 || i
.types
[op
].bitfield
.dword
)
7401 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7402 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7403 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7405 /* Prohibit these changes in the 64bit mode, since the
7406 lowering is more complicated. */
7408 && i
.tm
.opcode_modifier
.todword
7409 && i
.types
[0].bitfield
.class != RegSIMD
)
7411 /* Convert to DWORD. We don't want REX byte. */
7412 i
.suffix
= LONG_MNEM_SUFFIX
;
7416 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7417 register_prefix
, i
.op
[op
].regs
->reg_name
,
7426 check_word_reg (void)
7429 for (op
= i
.operands
; --op
>= 0;)
7430 /* Skip non-register operands. */
7431 if (i
.types
[op
].bitfield
.class != Reg
)
7433 /* Reject eight bit registers, except where the template requires
7434 them. (eg. movzb) */
7435 else if (i
.types
[op
].bitfield
.byte
7436 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7437 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7438 && (i
.tm
.operand_types
[op
].bitfield
.word
7439 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7441 as_bad (_("`%s%s' not allowed with `%s%c'"),
7443 i
.op
[op
].regs
->reg_name
,
7448 /* Error if the e or r prefix on a general reg is present. */
7449 else if ((i
.types
[op
].bitfield
.dword
7450 || i
.types
[op
].bitfield
.qword
)
7451 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7452 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7453 && i
.tm
.operand_types
[op
].bitfield
.word
)
7455 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7456 register_prefix
, i
.op
[op
].regs
->reg_name
,
7464 update_imm (unsigned int j
)
7466 i386_operand_type overlap
= i
.types
[j
];
7467 if ((overlap
.bitfield
.imm8
7468 || overlap
.bitfield
.imm8s
7469 || overlap
.bitfield
.imm16
7470 || overlap
.bitfield
.imm32
7471 || overlap
.bitfield
.imm32s
7472 || overlap
.bitfield
.imm64
)
7473 && !operand_type_equal (&overlap
, &imm8
)
7474 && !operand_type_equal (&overlap
, &imm8s
)
7475 && !operand_type_equal (&overlap
, &imm16
)
7476 && !operand_type_equal (&overlap
, &imm32
)
7477 && !operand_type_equal (&overlap
, &imm32s
)
7478 && !operand_type_equal (&overlap
, &imm64
))
7482 i386_operand_type temp
;
7484 operand_type_set (&temp
, 0);
7485 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7487 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7488 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7490 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7491 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7492 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7494 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7495 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7498 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7501 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7502 || operand_type_equal (&overlap
, &imm16_32
)
7503 || operand_type_equal (&overlap
, &imm16_32s
))
7505 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7510 else if (i
.prefix
[REX_PREFIX
] & REX_W
)
7511 overlap
= operand_type_and (overlap
, imm32s
);
7512 else if (i
.prefix
[DATA_PREFIX
])
7513 overlap
= operand_type_and (overlap
,
7514 flag_code
!= CODE_16BIT
? imm16
: imm32
);
7515 if (!operand_type_equal (&overlap
, &imm8
)
7516 && !operand_type_equal (&overlap
, &imm8s
)
7517 && !operand_type_equal (&overlap
, &imm16
)
7518 && !operand_type_equal (&overlap
, &imm32
)
7519 && !operand_type_equal (&overlap
, &imm32s
)
7520 && !operand_type_equal (&overlap
, &imm64
))
7522 as_bad (_("no instruction mnemonic suffix given; "
7523 "can't determine immediate size"));
7527 i
.types
[j
] = overlap
;
7537 /* Update the first 2 immediate operands. */
7538 n
= i
.operands
> 2 ? 2 : i
.operands
;
7541 for (j
= 0; j
< n
; j
++)
7542 if (update_imm (j
) == 0)
7545 /* The 3rd operand can't be immediate operand. */
7546 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7553 process_operands (void)
7555 /* Default segment register this instruction will use for memory
7556 accesses. 0 means unknown. This is only for optimizing out
7557 unnecessary segment overrides. */
7558 const reg_entry
*default_seg
= NULL
;
7560 if (i
.tm
.opcode_modifier
.sse2avx
)
7562 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
7564 i
.rex
|= i
.prefix
[REX_PREFIX
] & (REX_W
| REX_R
| REX_X
| REX_B
);
7565 i
.prefix
[REX_PREFIX
] = 0;
7568 /* ImmExt should be processed after SSE2AVX. */
7569 else if (i
.tm
.opcode_modifier
.immext
)
7572 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7574 unsigned int dupl
= i
.operands
;
7575 unsigned int dest
= dupl
- 1;
7578 /* The destination must be an xmm register. */
7579 gas_assert (i
.reg_operands
7580 && MAX_OPERANDS
> dupl
7581 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7583 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7584 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7586 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7588 /* Keep xmm0 for instructions with VEX prefix and 3
7590 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7591 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7596 /* We remove the first xmm0 and keep the number of
7597 operands unchanged, which in fact duplicates the
7599 for (j
= 1; j
< i
.operands
; j
++)
7601 i
.op
[j
- 1] = i
.op
[j
];
7602 i
.types
[j
- 1] = i
.types
[j
];
7603 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7604 i
.flags
[j
- 1] = i
.flags
[j
];
7608 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
7610 gas_assert ((MAX_OPERANDS
- 1) > dupl
7611 && (i
.tm
.opcode_modifier
.vexsources
7614 /* Add the implicit xmm0 for instructions with VEX prefix
7616 for (j
= i
.operands
; j
> 0; j
--)
7618 i
.op
[j
] = i
.op
[j
- 1];
7619 i
.types
[j
] = i
.types
[j
- 1];
7620 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7621 i
.flags
[j
] = i
.flags
[j
- 1];
7624 = (const reg_entry
*) str_hash_find (reg_hash
, "xmm0");
7625 i
.types
[0] = regxmm
;
7626 i
.tm
.operand_types
[0] = regxmm
;
7629 i
.reg_operands
+= 2;
7634 i
.op
[dupl
] = i
.op
[dest
];
7635 i
.types
[dupl
] = i
.types
[dest
];
7636 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7637 i
.flags
[dupl
] = i
.flags
[dest
];
7646 i
.op
[dupl
] = i
.op
[dest
];
7647 i
.types
[dupl
] = i
.types
[dest
];
7648 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7649 i
.flags
[dupl
] = i
.flags
[dest
];
7652 if (i
.tm
.opcode_modifier
.immext
)
7655 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7656 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7660 for (j
= 1; j
< i
.operands
; j
++)
7662 i
.op
[j
- 1] = i
.op
[j
];
7663 i
.types
[j
- 1] = i
.types
[j
];
7665 /* We need to adjust fields in i.tm since they are used by
7666 build_modrm_byte. */
7667 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7669 i
.flags
[j
- 1] = i
.flags
[j
];
7676 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7678 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7680 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7681 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7682 regnum
= register_number (i
.op
[1].regs
);
7683 first_reg_in_group
= regnum
& ~3;
7684 last_reg_in_group
= first_reg_in_group
+ 3;
7685 if (regnum
!= first_reg_in_group
)
7686 as_warn (_("source register `%s%s' implicitly denotes"
7687 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7688 register_prefix
, i
.op
[1].regs
->reg_name
,
7689 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7690 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7693 else if (i
.tm
.opcode_modifier
.regkludge
)
7695 /* The imul $imm, %reg instruction is converted into
7696 imul $imm, %reg, %reg, and the clr %reg instruction
7697 is converted into xor %reg, %reg. */
7699 unsigned int first_reg_op
;
7701 if (operand_type_check (i
.types
[0], reg
))
7705 /* Pretend we saw the extra register operand. */
7706 gas_assert (i
.reg_operands
== 1
7707 && i
.op
[first_reg_op
+ 1].regs
== 0);
7708 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7709 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7714 if (i
.tm
.opcode_modifier
.modrm
)
7716 /* The opcode is completed (modulo i.tm.extension_opcode which
7717 must be put into the modrm byte). Now, we make the modrm and
7718 index base bytes based on all the info we've collected. */
7720 default_seg
= build_modrm_byte ();
7722 else if (i
.types
[0].bitfield
.class == SReg
)
7724 if (flag_code
!= CODE_64BIT
7725 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7726 && i
.op
[0].regs
->reg_num
== 1
7727 : (i
.tm
.base_opcode
| 1) == (POP_SEG386_SHORT
& 0xff)
7728 && i
.op
[0].regs
->reg_num
< 4)
7730 as_bad (_("you can't `%s %s%s'"),
7731 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7734 if (i
.op
[0].regs
->reg_num
> 3
7735 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
7737 i
.tm
.base_opcode
^= (POP_SEG_SHORT
^ POP_SEG386_SHORT
) & 0xff;
7738 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
7740 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7742 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7743 && (i
.tm
.base_opcode
& ~3) == MOV_AX_DISP32
)
7745 default_seg
= reg_ds
;
7747 else if (i
.tm
.opcode_modifier
.isstring
)
7749 /* For the string instructions that allow a segment override
7750 on one of their operands, the default segment is ds. */
7751 default_seg
= reg_ds
;
7753 else if (i
.short_form
)
7755 /* The register or float register operand is in operand
7757 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7759 /* Register goes in low 3 bits of opcode. */
7760 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7761 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7763 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7765 /* Warn about some common errors, but press on regardless.
7766 The first case can be generated by gcc (<= 2.8.1). */
7767 if (i
.operands
== 2)
7769 /* Reversed arguments on faddp, fsubp, etc. */
7770 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7771 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7772 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7776 /* Extraneous `l' suffix on fp insn. */
7777 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7778 register_prefix
, i
.op
[0].regs
->reg_name
);
7783 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
7784 && i
.tm
.base_opcode
== 0x8d /* lea */
7785 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7786 && !is_any_vex_encoding(&i
.tm
))
7788 if (!quiet_warnings
)
7789 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7793 i
.prefix
[SEG_PREFIX
] = 0;
7797 /* If a segment was explicitly specified, and the specified segment
7798 is neither the default nor the one already recorded from a prefix,
7799 use an opcode prefix to select it. If we never figured out what
7800 the default segment is, then default_seg will be zero at this
7801 point, and the specified segment prefix will always be used. */
7803 && i
.seg
[0] != default_seg
7804 && i386_seg_prefixes
[i
.seg
[0]->reg_num
] != i
.prefix
[SEG_PREFIX
])
7806 if (!add_prefix (i386_seg_prefixes
[i
.seg
[0]->reg_num
]))
7812 static INLINE
void set_rex_vrex (const reg_entry
*r
, unsigned int rex_bit
,
7815 if (r
->reg_flags
& RegRex
)
7817 if (i
.rex
& rex_bit
)
7818 as_bad (_("same type of prefix used twice"));
7821 else if (do_sse2avx
&& (i
.rex
& rex_bit
) && i
.vex
.register_specifier
)
7823 gas_assert (i
.vex
.register_specifier
== r
);
7824 i
.vex
.register_specifier
+= 8;
7827 if (r
->reg_flags
& RegVRex
)
7831 static const reg_entry
*
7832 build_modrm_byte (void)
7834 const reg_entry
*default_seg
= NULL
;
7835 unsigned int source
, dest
;
7838 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
7841 unsigned int nds
, reg_slot
;
7844 dest
= i
.operands
- 1;
7847 /* There are 2 kinds of instructions:
7848 1. 5 operands: 4 register operands or 3 register operands
7849 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7850 VexW0 or VexW1. The destination must be either XMM, YMM or
7852 2. 4 operands: 4 register operands or 3 register operands
7853 plus 1 memory operand, with VexXDS. */
7854 gas_assert ((i
.reg_operands
== 4
7855 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
7856 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7857 && i
.tm
.opcode_modifier
.vexw
7858 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
7860 /* If VexW1 is set, the first non-immediate operand is the source and
7861 the second non-immediate one is encoded in the immediate operand. */
7862 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
7864 source
= i
.imm_operands
;
7865 reg_slot
= i
.imm_operands
+ 1;
7869 source
= i
.imm_operands
+ 1;
7870 reg_slot
= i
.imm_operands
;
7873 if (i
.imm_operands
== 0)
7875 /* When there is no immediate operand, generate an 8bit
7876 immediate operand to encode the first operand. */
7877 exp
= &im_expressions
[i
.imm_operands
++];
7878 i
.op
[i
.operands
].imms
= exp
;
7879 i
.types
[i
.operands
] = imm8
;
7882 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7883 exp
->X_op
= O_constant
;
7884 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
7885 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7889 gas_assert (i
.imm_operands
== 1);
7890 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
7891 gas_assert (!i
.tm
.opcode_modifier
.immext
);
7893 /* Turn on Imm8 again so that output_imm will generate it. */
7894 i
.types
[0].bitfield
.imm8
= 1;
7896 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7897 i
.op
[0].imms
->X_add_number
7898 |= register_number (i
.op
[reg_slot
].regs
) << 4;
7899 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7902 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
7903 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
7908 /* i.reg_operands MUST be the number of real register operands;
7909 implicit registers do not count. If there are 3 register
7910 operands, it must be a instruction with VexNDS. For a
7911 instruction with VexNDD, the destination register is encoded
7912 in VEX prefix. If there are 4 register operands, it must be
7913 a instruction with VEX prefix and 3 sources. */
7914 if (i
.mem_operands
== 0
7915 && ((i
.reg_operands
== 2
7916 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
7917 || (i
.reg_operands
== 3
7918 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7919 || (i
.reg_operands
== 4 && vex_3_sources
)))
7927 /* When there are 3 operands, one of them may be immediate,
7928 which may be the first or the last operand. Otherwise,
7929 the first operand must be shift count register (cl) or it
7930 is an instruction with VexNDS. */
7931 gas_assert (i
.imm_operands
== 1
7932 || (i
.imm_operands
== 0
7933 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7934 || (i
.types
[0].bitfield
.instance
== RegC
7935 && i
.types
[0].bitfield
.byte
))));
7936 if (operand_type_check (i
.types
[0], imm
)
7937 || (i
.types
[0].bitfield
.instance
== RegC
7938 && i
.types
[0].bitfield
.byte
))
7944 /* When there are 4 operands, the first two must be 8bit
7945 immediate operands. The source operand will be the 3rd
7948 For instructions with VexNDS, if the first operand
7949 an imm8, the source operand is the 2nd one. If the last
7950 operand is imm8, the source operand is the first one. */
7951 gas_assert ((i
.imm_operands
== 2
7952 && i
.types
[0].bitfield
.imm8
7953 && i
.types
[1].bitfield
.imm8
)
7954 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7955 && i
.imm_operands
== 1
7956 && (i
.types
[0].bitfield
.imm8
7957 || i
.types
[i
.operands
- 1].bitfield
.imm8
7958 || i
.rounding
.type
!= rc_none
)));
7959 if (i
.imm_operands
== 2)
7963 if (i
.types
[0].bitfield
.imm8
)
7970 if (is_evex_encoding (&i
.tm
))
7972 /* For EVEX instructions, when there are 5 operands, the
7973 first one must be immediate operand. If the second one
7974 is immediate operand, the source operand is the 3th
7975 one. If the last one is immediate operand, the source
7976 operand is the 2nd one. */
7977 gas_assert (i
.imm_operands
== 2
7978 && i
.tm
.opcode_modifier
.sae
7979 && operand_type_check (i
.types
[0], imm
));
7980 if (operand_type_check (i
.types
[1], imm
))
7982 else if (operand_type_check (i
.types
[4], imm
))
7996 /* RC/SAE operand could be between DEST and SRC. That happens
7997 when one operand is GPR and the other one is XMM/YMM/ZMM
7999 if (i
.rounding
.type
!= rc_none
&& i
.rounding
.operand
== dest
)
8002 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8004 /* For instructions with VexNDS, the register-only source
8005 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
8006 register. It is encoded in VEX prefix. */
8008 i386_operand_type op
;
8011 /* Swap two source operands if needed. */
8012 if (i
.tm
.opcode_modifier
.swapsources
)
8020 op
= i
.tm
.operand_types
[vvvv
];
8021 if ((dest
+ 1) >= i
.operands
8022 || ((op
.bitfield
.class != Reg
8023 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
8024 && op
.bitfield
.class != RegSIMD
8025 && !operand_type_equal (&op
, ®mask
)))
8027 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
8033 /* One of the register operands will be encoded in the i.rm.reg
8034 field, the other in the combined i.rm.mode and i.rm.regmem
8035 fields. If no form of this instruction supports a memory
8036 destination operand, then we assume the source operand may
8037 sometimes be a memory operand and so we need to store the
8038 destination in the i.rm.reg field. */
8039 if (!i
.tm
.opcode_modifier
.regmem
8040 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
8042 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
8043 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
8044 set_rex_vrex (i
.op
[dest
].regs
, REX_R
, i
.tm
.opcode_modifier
.sse2avx
);
8045 set_rex_vrex (i
.op
[source
].regs
, REX_B
, false);
8049 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
8050 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
8051 set_rex_vrex (i
.op
[dest
].regs
, REX_B
, i
.tm
.opcode_modifier
.sse2avx
);
8052 set_rex_vrex (i
.op
[source
].regs
, REX_R
, false);
8054 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
8056 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
8059 add_prefix (LOCK_PREFIX_OPCODE
);
8063 { /* If it's not 2 reg operands... */
8068 unsigned int fake_zero_displacement
= 0;
8071 for (op
= 0; op
< i
.operands
; op
++)
8072 if (i
.flags
[op
] & Operand_Mem
)
8074 gas_assert (op
< i
.operands
);
8076 if (i
.tm
.opcode_modifier
.sib
)
8078 /* The index register of VSIB shouldn't be RegIZ. */
8079 if (i
.tm
.opcode_modifier
.sib
!= SIBMEM
8080 && i
.index_reg
->reg_num
== RegIZ
)
8083 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8086 i
.sib
.base
= NO_BASE_REGISTER
;
8087 i
.sib
.scale
= i
.log2_scale_factor
;
8088 i
.types
[op
].bitfield
.disp8
= 0;
8089 i
.types
[op
].bitfield
.disp16
= 0;
8090 i
.types
[op
].bitfield
.disp64
= 0;
8091 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
8093 /* Must be 32 bit */
8094 i
.types
[op
].bitfield
.disp32
= 1;
8095 i
.types
[op
].bitfield
.disp32s
= 0;
8099 i
.types
[op
].bitfield
.disp32
= 0;
8100 i
.types
[op
].bitfield
.disp32s
= 1;
8104 /* Since the mandatory SIB always has index register, so
8105 the code logic remains unchanged. The non-mandatory SIB
8106 without index register is allowed and will be handled
8110 if (i
.index_reg
->reg_num
== RegIZ
)
8111 i
.sib
.index
= NO_INDEX_REGISTER
;
8113 i
.sib
.index
= i
.index_reg
->reg_num
;
8114 set_rex_vrex (i
.index_reg
, REX_X
, false);
8118 default_seg
= reg_ds
;
8120 if (i
.base_reg
== 0)
8123 if (!i
.disp_operands
)
8124 fake_zero_displacement
= 1;
8125 if (i
.index_reg
== 0)
8127 i386_operand_type newdisp
;
8129 /* Both check for VSIB and mandatory non-vector SIB. */
8130 gas_assert (!i
.tm
.opcode_modifier
.sib
8131 || i
.tm
.opcode_modifier
.sib
== SIBMEM
);
8132 /* Operand is just <disp> */
8133 if (flag_code
== CODE_64BIT
)
8135 /* 64bit mode overwrites the 32bit absolute
8136 addressing by RIP relative addressing and
8137 absolute addressing is encoded by one of the
8138 redundant SIB forms. */
8139 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8140 i
.sib
.base
= NO_BASE_REGISTER
;
8141 i
.sib
.index
= NO_INDEX_REGISTER
;
8142 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
8144 else if ((flag_code
== CODE_16BIT
)
8145 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
8147 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
8152 i
.rm
.regmem
= NO_BASE_REGISTER
;
8155 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8156 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
8158 else if (!i
.tm
.opcode_modifier
.sib
)
8160 /* !i.base_reg && i.index_reg */
8161 if (i
.index_reg
->reg_num
== RegIZ
)
8162 i
.sib
.index
= NO_INDEX_REGISTER
;
8164 i
.sib
.index
= i
.index_reg
->reg_num
;
8165 i
.sib
.base
= NO_BASE_REGISTER
;
8166 i
.sib
.scale
= i
.log2_scale_factor
;
8167 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8168 i
.types
[op
].bitfield
.disp8
= 0;
8169 i
.types
[op
].bitfield
.disp16
= 0;
8170 i
.types
[op
].bitfield
.disp64
= 0;
8171 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
8173 /* Must be 32 bit */
8174 i
.types
[op
].bitfield
.disp32
= 1;
8175 i
.types
[op
].bitfield
.disp32s
= 0;
8179 i
.types
[op
].bitfield
.disp32
= 0;
8180 i
.types
[op
].bitfield
.disp32s
= 1;
8182 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8186 /* RIP addressing for 64bit mode. */
8187 else if (i
.base_reg
->reg_num
== RegIP
)
8189 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8190 i
.rm
.regmem
= NO_BASE_REGISTER
;
8191 i
.types
[op
].bitfield
.disp8
= 0;
8192 i
.types
[op
].bitfield
.disp16
= 0;
8193 i
.types
[op
].bitfield
.disp32
= 0;
8194 i
.types
[op
].bitfield
.disp32s
= 1;
8195 i
.types
[op
].bitfield
.disp64
= 0;
8196 i
.flags
[op
] |= Operand_PCrel
;
8197 if (! i
.disp_operands
)
8198 fake_zero_displacement
= 1;
8200 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8202 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8203 switch (i
.base_reg
->reg_num
)
8206 if (i
.index_reg
== 0)
8208 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8209 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8212 default_seg
= reg_ss
;
8213 if (i
.index_reg
== 0)
8216 if (operand_type_check (i
.types
[op
], disp
) == 0)
8218 /* fake (%bp) into 0(%bp) */
8219 if (i
.disp_encoding
== disp_encoding_16bit
)
8220 i
.types
[op
].bitfield
.disp16
= 1;
8222 i
.types
[op
].bitfield
.disp8
= 1;
8223 fake_zero_displacement
= 1;
8226 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8227 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8229 default: /* (%si) -> 4 or (%di) -> 5 */
8230 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8232 if (!fake_zero_displacement
8236 fake_zero_displacement
= 1;
8237 if (i
.disp_encoding
== disp_encoding_8bit
)
8238 i
.types
[op
].bitfield
.disp8
= 1;
8240 i
.types
[op
].bitfield
.disp16
= 1;
8242 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8244 else /* i.base_reg and 32/64 bit mode */
8246 if (flag_code
== CODE_64BIT
8247 && operand_type_check (i
.types
[op
], disp
))
8249 i
.types
[op
].bitfield
.disp16
= 0;
8250 i
.types
[op
].bitfield
.disp64
= 0;
8251 if (i
.prefix
[ADDR_PREFIX
] == 0)
8253 i
.types
[op
].bitfield
.disp32
= 0;
8254 i
.types
[op
].bitfield
.disp32s
= 1;
8258 i
.types
[op
].bitfield
.disp32
= 1;
8259 i
.types
[op
].bitfield
.disp32s
= 0;
8263 if (!i
.tm
.opcode_modifier
.sib
)
8264 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8265 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8267 i
.sib
.base
= i
.base_reg
->reg_num
;
8268 /* x86-64 ignores REX prefix bit here to avoid decoder
8270 if (!(i
.base_reg
->reg_flags
& RegRex
)
8271 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8272 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8273 default_seg
= reg_ss
;
8274 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8276 fake_zero_displacement
= 1;
8277 if (i
.disp_encoding
== disp_encoding_32bit
)
8278 i
.types
[op
].bitfield
.disp32
= 1;
8280 i
.types
[op
].bitfield
.disp8
= 1;
8282 i
.sib
.scale
= i
.log2_scale_factor
;
8283 if (i
.index_reg
== 0)
8285 /* Only check for VSIB. */
8286 gas_assert (i
.tm
.opcode_modifier
.sib
!= VECSIB128
8287 && i
.tm
.opcode_modifier
.sib
!= VECSIB256
8288 && i
.tm
.opcode_modifier
.sib
!= VECSIB512
);
8290 /* <disp>(%esp) becomes two byte modrm with no index
8291 register. We've already stored the code for esp
8292 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8293 Any base register besides %esp will not use the
8294 extra modrm byte. */
8295 i
.sib
.index
= NO_INDEX_REGISTER
;
8297 else if (!i
.tm
.opcode_modifier
.sib
)
8299 if (i
.index_reg
->reg_num
== RegIZ
)
8300 i
.sib
.index
= NO_INDEX_REGISTER
;
8302 i
.sib
.index
= i
.index_reg
->reg_num
;
8303 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8304 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8309 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8310 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8314 if (!fake_zero_displacement
8318 fake_zero_displacement
= 1;
8319 if (i
.disp_encoding
== disp_encoding_8bit
)
8320 i
.types
[op
].bitfield
.disp8
= 1;
8322 i
.types
[op
].bitfield
.disp32
= 1;
8324 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8328 if (fake_zero_displacement
)
8330 /* Fakes a zero displacement assuming that i.types[op]
8331 holds the correct displacement size. */
8334 gas_assert (i
.op
[op
].disps
== 0);
8335 exp
= &disp_expressions
[i
.disp_operands
++];
8336 i
.op
[op
].disps
= exp
;
8337 exp
->X_op
= O_constant
;
8338 exp
->X_add_number
= 0;
8339 exp
->X_add_symbol
= (symbolS
*) 0;
8340 exp
->X_op_symbol
= (symbolS
*) 0;
8348 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8350 if (operand_type_check (i
.types
[0], imm
))
8351 i
.vex
.register_specifier
= NULL
;
8354 /* VEX.vvvv encodes one of the sources when the first
8355 operand is not an immediate. */
8356 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8357 i
.vex
.register_specifier
= i
.op
[0].regs
;
8359 i
.vex
.register_specifier
= i
.op
[1].regs
;
8362 /* Destination is a XMM register encoded in the ModRM.reg
8364 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8365 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8368 /* ModRM.rm and VEX.B encodes the other source. */
8369 if (!i
.mem_operands
)
8373 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8374 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8376 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8378 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8382 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8384 i
.vex
.register_specifier
= i
.op
[2].regs
;
8385 if (!i
.mem_operands
)
8388 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8389 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8393 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8394 (if any) based on i.tm.extension_opcode. Again, we must be
8395 careful to make sure that segment/control/debug/test/MMX
8396 registers are coded into the i.rm.reg field. */
8397 else if (i
.reg_operands
)
8400 unsigned int vex_reg
= ~0;
8402 for (op
= 0; op
< i
.operands
; op
++)
8403 if (i
.types
[op
].bitfield
.class == Reg
8404 || i
.types
[op
].bitfield
.class == RegBND
8405 || i
.types
[op
].bitfield
.class == RegMask
8406 || i
.types
[op
].bitfield
.class == SReg
8407 || i
.types
[op
].bitfield
.class == RegCR
8408 || i
.types
[op
].bitfield
.class == RegDR
8409 || i
.types
[op
].bitfield
.class == RegTR
8410 || i
.types
[op
].bitfield
.class == RegSIMD
8411 || i
.types
[op
].bitfield
.class == RegMMX
)
8416 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8418 /* For instructions with VexNDS, the register-only
8419 source operand is encoded in VEX prefix. */
8420 gas_assert (mem
!= (unsigned int) ~0);
8425 gas_assert (op
< i
.operands
);
8429 /* Check register-only source operand when two source
8430 operands are swapped. */
8431 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8432 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8436 gas_assert (mem
== (vex_reg
+ 1)
8437 && op
< i
.operands
);
8442 gas_assert (vex_reg
< i
.operands
);
8446 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8448 /* For instructions with VexNDD, the register destination
8449 is encoded in VEX prefix. */
8450 if (i
.mem_operands
== 0)
8452 /* There is no memory operand. */
8453 gas_assert ((op
+ 2) == i
.operands
);
8458 /* There are only 2 non-immediate operands. */
8459 gas_assert (op
< i
.imm_operands
+ 2
8460 && i
.operands
== i
.imm_operands
+ 2);
8461 vex_reg
= i
.imm_operands
+ 1;
8465 gas_assert (op
< i
.operands
);
8467 if (vex_reg
!= (unsigned int) ~0)
8469 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8471 if ((type
->bitfield
.class != Reg
8472 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8473 && type
->bitfield
.class != RegSIMD
8474 && !operand_type_equal (type
, ®mask
))
8477 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8480 /* Don't set OP operand twice. */
8483 /* If there is an extension opcode to put here, the
8484 register number must be put into the regmem field. */
8485 if (i
.tm
.extension_opcode
!= None
)
8487 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8488 set_rex_vrex (i
.op
[op
].regs
, REX_B
,
8489 i
.tm
.opcode_modifier
.sse2avx
);
8493 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8494 set_rex_vrex (i
.op
[op
].regs
, REX_R
,
8495 i
.tm
.opcode_modifier
.sse2avx
);
8499 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8500 must set it to 3 to indicate this is a register operand
8501 in the regmem field. */
8502 if (!i
.mem_operands
)
8506 /* Fill in i.rm.reg field with extension opcode (if any). */
8507 if (i
.tm
.extension_opcode
!= None
)
8508 i
.rm
.reg
= i
.tm
.extension_opcode
;
8514 frag_opcode_byte (unsigned char byte
)
8516 if (now_seg
!= absolute_section
)
8517 FRAG_APPEND_1_CHAR (byte
);
8519 ++abs_section_offset
;
8523 flip_code16 (unsigned int code16
)
8525 gas_assert (i
.tm
.operands
== 1);
8527 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8528 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8529 || i
.tm
.operand_types
[0].bitfield
.disp32s
8530 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8535 output_branch (void)
8541 relax_substateT subtype
;
8545 if (now_seg
== absolute_section
)
8547 as_bad (_("relaxable branches not supported in absolute section"));
8551 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8552 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
8555 if (i
.prefix
[DATA_PREFIX
] != 0)
8559 code16
^= flip_code16(code16
);
8561 /* Pentium4 branch hints. */
8562 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8563 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8568 if (i
.prefix
[REX_PREFIX
] != 0)
8574 /* BND prefixed jump. */
8575 if (i
.prefix
[BND_PREFIX
] != 0)
8581 if (i
.prefixes
!= 0)
8582 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8584 /* It's always a symbol; End frag & setup for relax.
8585 Make sure there is enough room in this frag for the largest
8586 instruction we may generate in md_convert_frag. This is 2
8587 bytes for the opcode and room for the prefix and largest
8589 frag_grow (prefix
+ 2 + 4);
8590 /* Prefix and 1 opcode byte go in fr_fix. */
8591 p
= frag_more (prefix
+ 1);
8592 if (i
.prefix
[DATA_PREFIX
] != 0)
8593 *p
++ = DATA_PREFIX_OPCODE
;
8594 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8595 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8596 *p
++ = i
.prefix
[SEG_PREFIX
];
8597 if (i
.prefix
[BND_PREFIX
] != 0)
8598 *p
++ = BND_PREFIX_OPCODE
;
8599 if (i
.prefix
[REX_PREFIX
] != 0)
8600 *p
++ = i
.prefix
[REX_PREFIX
];
8601 *p
= i
.tm
.base_opcode
;
8603 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8604 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8605 else if (cpu_arch_flags
.bitfield
.cpui386
)
8606 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8608 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8611 sym
= i
.op
[0].disps
->X_add_symbol
;
8612 off
= i
.op
[0].disps
->X_add_number
;
8614 if (i
.op
[0].disps
->X_op
!= O_constant
8615 && i
.op
[0].disps
->X_op
!= O_symbol
)
8617 /* Handle complex expressions. */
8618 sym
= make_expr_symbol (i
.op
[0].disps
);
8622 /* 1 possible extra opcode + 4 byte displacement go in var part.
8623 Pass reloc in fr_var. */
8624 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8627 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8628 /* Return TRUE iff PLT32 relocation should be used for branching to
8632 need_plt32_p (symbolS
*s
)
8634 /* PLT32 relocation is ELF only. */
8639 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8640 krtld support it. */
8644 /* Since there is no need to prepare for PLT branch on x86-64, we
8645 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8646 be used as a marker for 32-bit PC-relative branches. */
8653 /* Weak or undefined symbol need PLT32 relocation. */
8654 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8657 /* Non-global symbol doesn't need PLT32 relocation. */
8658 if (! S_IS_EXTERNAL (s
))
8661 /* Other global symbols need PLT32 relocation. NB: Symbol with
8662 non-default visibilities are treated as normal global symbol
8663 so that PLT32 relocation can be used as a marker for 32-bit
8664 PC-relative branches. It is useful for linker relaxation. */
8675 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8677 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8679 /* This is a loop or jecxz type instruction. */
8681 if (i
.prefix
[ADDR_PREFIX
] != 0)
8683 frag_opcode_byte (ADDR_PREFIX_OPCODE
);
8686 /* Pentium4 branch hints. */
8687 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8688 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8690 frag_opcode_byte (i
.prefix
[SEG_PREFIX
]);
8699 if (flag_code
== CODE_16BIT
)
8702 if (i
.prefix
[DATA_PREFIX
] != 0)
8704 frag_opcode_byte (DATA_PREFIX_OPCODE
);
8706 code16
^= flip_code16(code16
);
8714 /* BND prefixed jump. */
8715 if (i
.prefix
[BND_PREFIX
] != 0)
8717 frag_opcode_byte (i
.prefix
[BND_PREFIX
]);
8721 if (i
.prefix
[REX_PREFIX
] != 0)
8723 frag_opcode_byte (i
.prefix
[REX_PREFIX
]);
8727 if (i
.prefixes
!= 0)
8728 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8730 if (now_seg
== absolute_section
)
8732 abs_section_offset
+= i
.opcode_length
+ size
;
8736 p
= frag_more (i
.opcode_length
+ size
);
8737 switch (i
.opcode_length
)
8740 *p
++ = i
.tm
.base_opcode
>> 8;
8743 *p
++ = i
.tm
.base_opcode
;
8749 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8751 && jump_reloc
== NO_RELOC
8752 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8753 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8756 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8758 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8759 i
.op
[0].disps
, 1, jump_reloc
);
8761 /* All jumps handled here are signed, but don't use a signed limit
8762 check for 32 and 16 bit jumps as we want to allow wrap around at
8763 4G and 64k respectively. */
8765 fixP
->fx_signed
= 1;
8769 output_interseg_jump (void)
8777 if (flag_code
== CODE_16BIT
)
8781 if (i
.prefix
[DATA_PREFIX
] != 0)
8788 gas_assert (!i
.prefix
[REX_PREFIX
]);
8794 if (i
.prefixes
!= 0)
8795 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8797 if (now_seg
== absolute_section
)
8799 abs_section_offset
+= prefix
+ 1 + 2 + size
;
8803 /* 1 opcode; 2 segment; offset */
8804 p
= frag_more (prefix
+ 1 + 2 + size
);
8806 if (i
.prefix
[DATA_PREFIX
] != 0)
8807 *p
++ = DATA_PREFIX_OPCODE
;
8809 if (i
.prefix
[REX_PREFIX
] != 0)
8810 *p
++ = i
.prefix
[REX_PREFIX
];
8812 *p
++ = i
.tm
.base_opcode
;
8813 if (i
.op
[1].imms
->X_op
== O_constant
)
8815 offsetT n
= i
.op
[1].imms
->X_add_number
;
8818 && !fits_in_unsigned_word (n
)
8819 && !fits_in_signed_word (n
))
8821 as_bad (_("16-bit jump out of range"));
8824 md_number_to_chars (p
, n
, size
);
8827 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8828 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
8831 if (i
.op
[0].imms
->X_op
== O_constant
)
8832 md_number_to_chars (p
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
8834 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, 2,
8835 i
.op
[0].imms
, 0, reloc (2, 0, 0, i
.reloc
[0]));
8838 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8843 asection
*seg
= now_seg
;
8844 subsegT subseg
= now_subseg
;
8846 unsigned int alignment
, align_size_1
;
8847 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
8848 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
8849 unsigned int padding
;
8851 if (!IS_ELF
|| !x86_used_note
)
8854 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
8856 /* The .note.gnu.property section layout:
8858 Field Length Contents
8861 n_descsz 4 The note descriptor size
8862 n_type 4 NT_GNU_PROPERTY_TYPE_0
8864 n_desc n_descsz The program property array
8868 /* Create the .note.gnu.property section. */
8869 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
8870 bfd_set_section_flags (sec
,
8877 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
8888 bfd_set_section_alignment (sec
, alignment
);
8889 elf_section_type (sec
) = SHT_NOTE
;
8891 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8893 isa_1_descsz_raw
= 4 + 4 + 4;
8894 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8895 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
8897 feature_2_descsz_raw
= isa_1_descsz
;
8898 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8900 feature_2_descsz_raw
+= 4 + 4 + 4;
8901 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8902 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
8905 descsz
= feature_2_descsz
;
8906 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8907 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
8909 /* Write n_namsz. */
8910 md_number_to_chars (p
, (valueT
) 4, 4);
8912 /* Write n_descsz. */
8913 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
8916 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
8919 memcpy (p
+ 4 * 3, "GNU", 4);
8921 /* Write 4-byte type. */
8922 md_number_to_chars (p
+ 4 * 4,
8923 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
8925 /* Write 4-byte data size. */
8926 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
8928 /* Write 4-byte data. */
8929 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
8931 /* Zero out paddings. */
8932 padding
= isa_1_descsz
- isa_1_descsz_raw
;
8934 memset (p
+ 4 * 7, 0, padding
);
8936 /* Write 4-byte type. */
8937 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
8938 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
8940 /* Write 4-byte data size. */
8941 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
8943 /* Write 4-byte data. */
8944 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
8945 (valueT
) x86_feature_2_used
, 4);
8947 /* Zero out paddings. */
8948 padding
= feature_2_descsz
- feature_2_descsz_raw
;
8950 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
8952 /* We probably can't restore the current segment, for there likely
8955 subseg_set (seg
, subseg
);
8960 encoding_length (const fragS
*start_frag
, offsetT start_off
,
8961 const char *frag_now_ptr
)
8963 unsigned int len
= 0;
8965 if (start_frag
!= frag_now
)
8967 const fragS
*fr
= start_frag
;
8972 } while (fr
&& fr
!= frag_now
);
8975 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
8978 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
8979 be macro-fused with conditional jumps.
8980 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
8981 or is one of the following format:
8994 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
8996 /* No RIP address. */
8997 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9000 /* No opcodes outside of base encoding space. */
9001 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9004 /* add, sub without add/sub m, imm. */
9005 if (i
.tm
.base_opcode
<= 5
9006 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
9007 || ((i
.tm
.base_opcode
| 3) == 0x83
9008 && (i
.tm
.extension_opcode
== 0x5
9009 || i
.tm
.extension_opcode
== 0x0)))
9011 *mf_cmp_p
= mf_cmp_alu_cmp
;
9012 return !(i
.mem_operands
&& i
.imm_operands
);
9015 /* and without and m, imm. */
9016 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
9017 || ((i
.tm
.base_opcode
| 3) == 0x83
9018 && i
.tm
.extension_opcode
== 0x4))
9020 *mf_cmp_p
= mf_cmp_test_and
;
9021 return !(i
.mem_operands
&& i
.imm_operands
);
9024 /* test without test m imm. */
9025 if ((i
.tm
.base_opcode
| 1) == 0x85
9026 || (i
.tm
.base_opcode
| 1) == 0xa9
9027 || ((i
.tm
.base_opcode
| 1) == 0xf7
9028 && i
.tm
.extension_opcode
== 0))
9030 *mf_cmp_p
= mf_cmp_test_and
;
9031 return !(i
.mem_operands
&& i
.imm_operands
);
9034 /* cmp without cmp m, imm. */
9035 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
9036 || ((i
.tm
.base_opcode
| 3) == 0x83
9037 && (i
.tm
.extension_opcode
== 0x7)))
9039 *mf_cmp_p
= mf_cmp_alu_cmp
;
9040 return !(i
.mem_operands
&& i
.imm_operands
);
9043 /* inc, dec without inc/dec m. */
9044 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
9045 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
9046 || ((i
.tm
.base_opcode
| 1) == 0xff
9047 && i
.tm
.extension_opcode
<= 0x1))
9049 *mf_cmp_p
= mf_cmp_incdec
;
9050 return !i
.mem_operands
;
9056 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
9059 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
9061 /* NB: Don't work with COND_JUMP86 without i386. */
9062 if (!align_branch_power
9063 || now_seg
== absolute_section
9064 || !cpu_arch_flags
.bitfield
.cpui386
9065 || !(align_branch
& align_branch_fused_bit
))
9068 if (maybe_fused_with_jcc_p (mf_cmp_p
))
9070 if (last_insn
.kind
== last_insn_other
9071 || last_insn
.seg
!= now_seg
)
9074 as_warn_where (last_insn
.file
, last_insn
.line
,
9075 _("`%s` skips -malign-branch-boundary on `%s`"),
9076 last_insn
.name
, i
.tm
.name
);
9082 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
9085 add_branch_prefix_frag_p (void)
9087 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
9088 to PadLock instructions since they include prefixes in opcode. */
9089 if (!align_branch_power
9090 || !align_branch_prefix_size
9091 || now_seg
== absolute_section
9092 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
9093 || !cpu_arch_flags
.bitfield
.cpui386
)
9096 /* Don't add prefix if it is a prefix or there is no operand in case
9097 that segment prefix is special. */
9098 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
9101 if (last_insn
.kind
== last_insn_other
9102 || last_insn
.seg
!= now_seg
)
9106 as_warn_where (last_insn
.file
, last_insn
.line
,
9107 _("`%s` skips -malign-branch-boundary on `%s`"),
9108 last_insn
.name
, i
.tm
.name
);
9113 /* Return 1 if a BRANCH_PADDING frag should be generated. */
9116 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
9117 enum mf_jcc_kind
*mf_jcc_p
)
9121 /* NB: Don't work with COND_JUMP86 without i386. */
9122 if (!align_branch_power
9123 || now_seg
== absolute_section
9124 || !cpu_arch_flags
.bitfield
.cpui386
9125 || i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9130 /* Check for jcc and direct jmp. */
9131 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9133 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
9135 *branch_p
= align_branch_jmp
;
9136 add_padding
= align_branch
& align_branch_jmp_bit
;
9140 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
9141 igore the lowest bit. */
9142 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
9143 *branch_p
= align_branch_jcc
;
9144 if ((align_branch
& align_branch_jcc_bit
))
9148 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
9151 *branch_p
= align_branch_ret
;
9152 if ((align_branch
& align_branch_ret_bit
))
9157 /* Check for indirect jmp, direct and indirect calls. */
9158 if (i
.tm
.base_opcode
== 0xe8)
9161 *branch_p
= align_branch_call
;
9162 if ((align_branch
& align_branch_call_bit
))
9165 else if (i
.tm
.base_opcode
== 0xff
9166 && (i
.tm
.extension_opcode
== 2
9167 || i
.tm
.extension_opcode
== 4))
9169 /* Indirect call and jmp. */
9170 *branch_p
= align_branch_indirect
;
9171 if ((align_branch
& align_branch_indirect_bit
))
9178 && (i
.op
[0].disps
->X_op
== O_symbol
9179 || (i
.op
[0].disps
->X_op
== O_subtract
9180 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
9182 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
9183 /* No padding to call to global or undefined tls_get_addr. */
9184 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
9185 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
9191 && last_insn
.kind
!= last_insn_other
9192 && last_insn
.seg
== now_seg
)
9195 as_warn_where (last_insn
.file
, last_insn
.line
,
9196 _("`%s` skips -malign-branch-boundary on `%s`"),
9197 last_insn
.name
, i
.tm
.name
);
9207 fragS
*insn_start_frag
;
9208 offsetT insn_start_off
;
9209 fragS
*fragP
= NULL
;
9210 enum align_branch_kind branch
= align_branch_none
;
9211 /* The initializer is arbitrary just to avoid uninitialized error.
9212 it's actually either assigned in add_branch_padding_frag_p
9213 or never be used. */
9214 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
9216 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9217 if (IS_ELF
&& x86_used_note
&& now_seg
!= absolute_section
)
9219 if ((i
.xstate
& xstate_tmm
) == xstate_tmm
9220 || i
.tm
.cpu_flags
.bitfield
.cpuamx_tile
)
9221 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_TMM
;
9223 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9224 || i
.tm
.cpu_flags
.bitfield
.cpu287
9225 || i
.tm
.cpu_flags
.bitfield
.cpu387
9226 || i
.tm
.cpu_flags
.bitfield
.cpu687
9227 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9228 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9230 if ((i
.xstate
& xstate_mmx
)
9231 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9232 && !is_any_vex_encoding (&i
.tm
)
9233 && (i
.tm
.base_opcode
== 0x77 /* emms */
9234 || i
.tm
.base_opcode
== 0x0e /* femms */)))
9235 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9239 if (i
.index_reg
->reg_type
.bitfield
.zmmword
)
9240 i
.xstate
|= xstate_zmm
;
9241 else if (i
.index_reg
->reg_type
.bitfield
.ymmword
)
9242 i
.xstate
|= xstate_ymm
;
9243 else if (i
.index_reg
->reg_type
.bitfield
.xmmword
)
9244 i
.xstate
|= xstate_xmm
;
9247 /* vzeroall / vzeroupper */
9248 if (i
.tm
.base_opcode
== 0x77 && i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9249 i
.xstate
|= xstate_ymm
;
9251 if ((i
.xstate
& xstate_xmm
)
9252 /* ldmxcsr / stmxcsr / vldmxcsr / vstmxcsr */
9253 || (i
.tm
.base_opcode
== 0xae
9254 && (i
.tm
.cpu_flags
.bitfield
.cpusse
9255 || i
.tm
.cpu_flags
.bitfield
.cpuavx
))
9256 || i
.tm
.cpu_flags
.bitfield
.cpuwidekl
9257 || i
.tm
.cpu_flags
.bitfield
.cpukl
)
9258 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9260 if ((i
.xstate
& xstate_ymm
) == xstate_ymm
)
9261 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9262 if ((i
.xstate
& xstate_zmm
) == xstate_zmm
)
9263 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9264 if (i
.mask
.reg
|| (i
.xstate
& xstate_mask
) == xstate_mask
)
9265 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MASK
;
9266 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9267 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9268 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9269 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9270 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9271 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9272 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9273 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9275 if (x86_feature_2_used
9276 || i
.tm
.cpu_flags
.bitfield
.cpucmov
9277 || i
.tm
.cpu_flags
.bitfield
.cpusyscall
9278 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9279 && i
.tm
.base_opcode
== 0xc7
9280 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
9281 && i
.tm
.extension_opcode
== 1) /* cmpxchg8b */)
9282 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_BASELINE
;
9283 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
9284 || i
.tm
.cpu_flags
.bitfield
.cpussse3
9285 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
9286 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
9287 || i
.tm
.cpu_flags
.bitfield
.cpucx16
9288 || i
.tm
.cpu_flags
.bitfield
.cpupopcnt
9289 /* LAHF-SAHF insns in 64-bit mode. */
9290 || (flag_code
== CODE_64BIT
9291 && (i
.tm
.base_opcode
| 1) == 0x9f
9292 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
))
9293 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V2
;
9294 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
9295 || i
.tm
.cpu_flags
.bitfield
.cpuavx2
9296 /* Any VEX encoded insns execpt for CpuAVX512F, CpuAVX512BW,
9297 CpuAVX512DQ, LPW, TBM and AMX. */
9298 || (i
.tm
.opcode_modifier
.vex
9299 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9300 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9301 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9302 && !i
.tm
.cpu_flags
.bitfield
.cpulwp
9303 && !i
.tm
.cpu_flags
.bitfield
.cputbm
9304 && !(x86_feature_2_used
& GNU_PROPERTY_X86_FEATURE_2_TMM
))
9305 || i
.tm
.cpu_flags
.bitfield
.cpuf16c
9306 || i
.tm
.cpu_flags
.bitfield
.cpufma
9307 || i
.tm
.cpu_flags
.bitfield
.cpulzcnt
9308 || i
.tm
.cpu_flags
.bitfield
.cpumovbe
9309 || i
.tm
.cpu_flags
.bitfield
.cpuxsaves
9310 || (x86_feature_2_used
9311 & (GNU_PROPERTY_X86_FEATURE_2_XSAVE
9312 | GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
9313 | GNU_PROPERTY_X86_FEATURE_2_XSAVEC
)) != 0)
9314 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V3
;
9315 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9316 || i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9317 || i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9318 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
9319 /* Any EVEX encoded insns except for AVX512ER, AVX512PF and
9321 || (i
.tm
.opcode_modifier
.evex
9322 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512er
9323 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
9324 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
))
9325 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V4
;
9329 /* Tie dwarf2 debug info to the address at the start of the insn.
9330 We can't do this after the insn has been output as the current
9331 frag may have been closed off. eg. by frag_var. */
9332 dwarf2_emit_insn (0);
9334 insn_start_frag
= frag_now
;
9335 insn_start_off
= frag_now_fix ();
9337 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9340 /* Branch can be 8 bytes. Leave some room for prefixes. */
9341 unsigned int max_branch_padding_size
= 14;
9343 /* Align section to boundary. */
9344 record_alignment (now_seg
, align_branch_power
);
9346 /* Make room for padding. */
9347 frag_grow (max_branch_padding_size
);
9349 /* Start of the padding. */
9354 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9355 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9358 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9359 fragP
->tc_frag_data
.branch_type
= branch
;
9360 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9364 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9366 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9367 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9369 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9370 output_interseg_jump ();
9373 /* Output normal instructions here. */
9377 enum mf_cmp_kind mf_cmp
;
9380 && (i
.tm
.base_opcode
== 0xaee8
9381 || i
.tm
.base_opcode
== 0xaef0
9382 || i
.tm
.base_opcode
== 0xaef8))
9384 /* Encode lfence, mfence, and sfence as
9385 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9386 if (now_seg
!= absolute_section
)
9388 offsetT val
= 0x240483f0ULL
;
9391 md_number_to_chars (p
, val
, 5);
9394 abs_section_offset
+= 5;
9398 /* Some processors fail on LOCK prefix. This options makes
9399 assembler ignore LOCK prefix and serves as a workaround. */
9400 if (omit_lock_prefix
)
9402 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
9403 && i
.tm
.opcode_modifier
.isprefix
)
9405 i
.prefix
[LOCK_PREFIX
] = 0;
9409 /* Skip if this is a branch. */
9411 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9413 /* Make room for padding. */
9414 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9419 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9420 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9423 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9424 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9425 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9427 else if (add_branch_prefix_frag_p ())
9429 unsigned int max_prefix_size
= align_branch_prefix_size
;
9431 /* Make room for padding. */
9432 frag_grow (max_prefix_size
);
9437 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9438 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9441 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9444 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9445 don't need the explicit prefix. */
9446 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
9448 switch (i
.tm
.opcode_modifier
.opcodeprefix
)
9457 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9458 || (i
.prefix
[REP_PREFIX
] != 0xf3))
9462 switch (i
.opcode_length
)
9467 /* Check for pseudo prefixes. */
9468 if (!i
.tm
.opcode_modifier
.isprefix
|| i
.tm
.base_opcode
)
9470 as_bad_where (insn_start_frag
->fr_file
,
9471 insn_start_frag
->fr_line
,
9472 _("pseudo prefix without instruction"));
9482 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9483 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9484 R_X86_64_GOTTPOFF relocation so that linker can safely
9485 perform IE->LE optimization. A dummy REX_OPCODE prefix
9486 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9487 relocation for GDesc -> IE/LE optimization. */
9488 if (x86_elf_abi
== X86_64_X32_ABI
9490 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9491 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9492 && i
.prefix
[REX_PREFIX
] == 0)
9493 add_prefix (REX_OPCODE
);
9496 /* The prefix bytes. */
9497 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9499 frag_opcode_byte (*q
);
9503 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9509 frag_opcode_byte (*q
);
9512 /* There should be no other prefixes for instructions
9517 /* For EVEX instructions i.vrex should become 0 after
9518 build_evex_prefix. For VEX instructions upper 16 registers
9519 aren't available, so VREX should be 0. */
9522 /* Now the VEX prefix. */
9523 if (now_seg
!= absolute_section
)
9525 p
= frag_more (i
.vex
.length
);
9526 for (j
= 0; j
< i
.vex
.length
; j
++)
9527 p
[j
] = i
.vex
.bytes
[j
];
9530 abs_section_offset
+= i
.vex
.length
;
9533 /* Now the opcode; be careful about word order here! */
9534 j
= i
.opcode_length
;
9536 switch (i
.tm
.opcode_modifier
.opcodespace
)
9551 if (now_seg
== absolute_section
)
9552 abs_section_offset
+= j
;
9555 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9561 && i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9564 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_0F
)
9565 *p
++ = i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
9569 switch (i
.opcode_length
)
9572 /* Put out high byte first: can't use md_number_to_chars! */
9573 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9576 *p
= i
.tm
.base_opcode
& 0xff;
9585 /* Now the modrm byte and sib byte (if present). */
9586 if (i
.tm
.opcode_modifier
.modrm
)
9588 frag_opcode_byte ((i
.rm
.regmem
<< 0)
9590 | (i
.rm
.mode
<< 6));
9591 /* If i.rm.regmem == ESP (4)
9592 && i.rm.mode != (Register mode)
9594 ==> need second modrm byte. */
9595 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9597 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9598 frag_opcode_byte ((i
.sib
.base
<< 0)
9599 | (i
.sib
.index
<< 3)
9600 | (i
.sib
.scale
<< 6));
9603 if (i
.disp_operands
)
9604 output_disp (insn_start_frag
, insn_start_off
);
9607 output_imm (insn_start_frag
, insn_start_off
);
9610 * frag_now_fix () returning plain abs_section_offset when we're in the
9611 * absolute section, and abs_section_offset not getting updated as data
9612 * gets added to the frag breaks the logic below.
9614 if (now_seg
!= absolute_section
)
9616 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9618 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9622 /* NB: Don't add prefix with GOTPC relocation since
9623 output_disp() above depends on the fixed encoding
9624 length. Can't add prefix with TLS relocation since
9625 it breaks TLS linker optimization. */
9626 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9627 /* Prefix count on the current instruction. */
9628 unsigned int count
= i
.vex
.length
;
9630 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9631 /* REX byte is encoded in VEX/EVEX prefix. */
9632 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9635 /* Count prefixes for extended opcode maps. */
9637 switch (i
.tm
.opcode_modifier
.opcodespace
)
9652 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9655 /* Set the maximum prefix size in BRANCH_PREFIX
9657 if (fragP
->tc_frag_data
.max_bytes
> max
)
9658 fragP
->tc_frag_data
.max_bytes
= max
;
9659 if (fragP
->tc_frag_data
.max_bytes
> count
)
9660 fragP
->tc_frag_data
.max_bytes
-= count
;
9662 fragP
->tc_frag_data
.max_bytes
= 0;
9666 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9668 unsigned int max_prefix_size
;
9669 if (align_branch_prefix_size
> max
)
9670 max_prefix_size
= max
;
9672 max_prefix_size
= align_branch_prefix_size
;
9673 if (max_prefix_size
> count
)
9674 fragP
->tc_frag_data
.max_prefix_length
9675 = max_prefix_size
- count
;
9678 /* Use existing segment prefix if possible. Use CS
9679 segment prefix in 64-bit mode. In 32-bit mode, use SS
9680 segment prefix with ESP/EBP base register and use DS
9681 segment prefix without ESP/EBP base register. */
9682 if (i
.prefix
[SEG_PREFIX
])
9683 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9684 else if (flag_code
== CODE_64BIT
)
9685 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9687 && (i
.base_reg
->reg_num
== 4
9688 || i
.base_reg
->reg_num
== 5))
9689 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9691 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9696 /* NB: Don't work with COND_JUMP86 without i386. */
9697 if (align_branch_power
9698 && now_seg
!= absolute_section
9699 && cpu_arch_flags
.bitfield
.cpui386
)
9701 /* Terminate each frag so that we can add prefix and check for
9703 frag_wane (frag_now
);
9710 pi ("" /*line*/, &i
);
9712 #endif /* DEBUG386 */
9715 /* Return the size of the displacement operand N. */
9718 disp_size (unsigned int n
)
9722 if (i
.types
[n
].bitfield
.disp64
)
9724 else if (i
.types
[n
].bitfield
.disp8
)
9726 else if (i
.types
[n
].bitfield
.disp16
)
9731 /* Return the size of the immediate operand N. */
9734 imm_size (unsigned int n
)
9737 if (i
.types
[n
].bitfield
.imm64
)
9739 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9741 else if (i
.types
[n
].bitfield
.imm16
)
9747 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
9752 for (n
= 0; n
< i
.operands
; n
++)
9754 if (operand_type_check (i
.types
[n
], disp
))
9756 int size
= disp_size (n
);
9758 if (now_seg
== absolute_section
)
9759 abs_section_offset
+= size
;
9760 else if (i
.op
[n
].disps
->X_op
== O_constant
)
9762 offsetT val
= i
.op
[n
].disps
->X_add_number
;
9764 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
9766 p
= frag_more (size
);
9767 md_number_to_chars (p
, val
, size
);
9771 enum bfd_reloc_code_real reloc_type
;
9772 int sign
= i
.types
[n
].bitfield
.disp32s
;
9773 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
9776 /* We can't have 8 bit displacement here. */
9777 gas_assert (!i
.types
[n
].bitfield
.disp8
);
9779 /* The PC relative address is computed relative
9780 to the instruction boundary, so in case immediate
9781 fields follows, we need to adjust the value. */
9782 if (pcrel
&& i
.imm_operands
)
9787 for (n1
= 0; n1
< i
.operands
; n1
++)
9788 if (operand_type_check (i
.types
[n1
], imm
))
9790 /* Only one immediate is allowed for PC
9791 relative address. */
9792 gas_assert (sz
== 0);
9794 i
.op
[n
].disps
->X_add_number
-= sz
;
9796 /* We should find the immediate. */
9797 gas_assert (sz
!= 0);
9800 p
= frag_more (size
);
9801 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
9803 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
9804 && (((reloc_type
== BFD_RELOC_32
9805 || reloc_type
== BFD_RELOC_X86_64_32S
9806 || (reloc_type
== BFD_RELOC_64
9808 && (i
.op
[n
].disps
->X_op
== O_symbol
9809 || (i
.op
[n
].disps
->X_op
== O_add
9810 && ((symbol_get_value_expression
9811 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
9813 || reloc_type
== BFD_RELOC_32_PCREL
))
9817 reloc_type
= BFD_RELOC_386_GOTPC
;
9818 i
.has_gotpc_tls_reloc
= true;
9819 i
.op
[n
].imms
->X_add_number
+=
9820 encoding_length (insn_start_frag
, insn_start_off
, p
);
9822 else if (reloc_type
== BFD_RELOC_64
)
9823 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9825 /* Don't do the adjustment for x86-64, as there
9826 the pcrel addressing is relative to the _next_
9827 insn, and that is taken care of in other code. */
9828 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9830 else if (align_branch_power
)
9834 case BFD_RELOC_386_TLS_GD
:
9835 case BFD_RELOC_386_TLS_LDM
:
9836 case BFD_RELOC_386_TLS_IE
:
9837 case BFD_RELOC_386_TLS_IE_32
:
9838 case BFD_RELOC_386_TLS_GOTIE
:
9839 case BFD_RELOC_386_TLS_GOTDESC
:
9840 case BFD_RELOC_386_TLS_DESC_CALL
:
9841 case BFD_RELOC_X86_64_TLSGD
:
9842 case BFD_RELOC_X86_64_TLSLD
:
9843 case BFD_RELOC_X86_64_GOTTPOFF
:
9844 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9845 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9846 i
.has_gotpc_tls_reloc
= true;
9851 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
9852 size
, i
.op
[n
].disps
, pcrel
,
9854 /* Check for "call/jmp *mem", "mov mem, %reg",
9855 "test %reg, mem" and "binop mem, %reg" where binop
9856 is one of adc, add, and, cmp, or, sbb, sub, xor
9857 instructions without data prefix. Always generate
9858 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
9859 if (i
.prefix
[DATA_PREFIX
] == 0
9860 && (generate_relax_relocations
9863 && i
.rm
.regmem
== 5))
9865 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
9866 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
9867 && ((i
.operands
== 1
9868 && i
.tm
.base_opcode
== 0xff
9869 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
9871 && (i
.tm
.base_opcode
== 0x8b
9872 || i
.tm
.base_opcode
== 0x85
9873 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
9877 fixP
->fx_tcbit
= i
.rex
!= 0;
9879 && (i
.base_reg
->reg_num
== RegIP
))
9880 fixP
->fx_tcbit2
= 1;
9883 fixP
->fx_tcbit2
= 1;
9891 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
9896 for (n
= 0; n
< i
.operands
; n
++)
9898 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
9899 if (i
.rounding
.type
!= rc_none
&& n
== i
.rounding
.operand
)
9902 if (operand_type_check (i
.types
[n
], imm
))
9904 int size
= imm_size (n
);
9906 if (now_seg
== absolute_section
)
9907 abs_section_offset
+= size
;
9908 else if (i
.op
[n
].imms
->X_op
== O_constant
)
9912 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
9914 p
= frag_more (size
);
9915 md_number_to_chars (p
, val
, size
);
9919 /* Not absolute_section.
9920 Need a 32-bit fixup (don't support 8bit
9921 non-absolute imms). Try to support other
9923 enum bfd_reloc_code_real reloc_type
;
9926 if (i
.types
[n
].bitfield
.imm32s
9927 && (i
.suffix
== QWORD_MNEM_SUFFIX
9928 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
9933 p
= frag_more (size
);
9934 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
9936 /* This is tough to explain. We end up with this one if we
9937 * have operands that look like
9938 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
9939 * obtain the absolute address of the GOT, and it is strongly
9940 * preferable from a performance point of view to avoid using
9941 * a runtime relocation for this. The actual sequence of
9942 * instructions often look something like:
9947 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
9949 * The call and pop essentially return the absolute address
9950 * of the label .L66 and store it in %ebx. The linker itself
9951 * will ultimately change the first operand of the addl so
9952 * that %ebx points to the GOT, but to keep things simple, the
9953 * .o file must have this operand set so that it generates not
9954 * the absolute address of .L66, but the absolute address of
9955 * itself. This allows the linker itself simply treat a GOTPC
9956 * relocation as asking for a pcrel offset to the GOT to be
9957 * added in, and the addend of the relocation is stored in the
9958 * operand field for the instruction itself.
9960 * Our job here is to fix the operand so that it would add
9961 * the correct offset so that %ebx would point to itself. The
9962 * thing that is tricky is that .-.L66 will point to the
9963 * beginning of the instruction, so we need to further modify
9964 * the operand so that it will point to itself. There are
9965 * other cases where you have something like:
9967 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
9969 * and here no correction would be required. Internally in
9970 * the assembler we treat operands of this form as not being
9971 * pcrel since the '.' is explicitly mentioned, and I wonder
9972 * whether it would simplify matters to do it this way. Who
9973 * knows. In earlier versions of the PIC patches, the
9974 * pcrel_adjust field was used to store the correction, but
9975 * since the expression is not pcrel, I felt it would be
9976 * confusing to do it this way. */
9978 if ((reloc_type
== BFD_RELOC_32
9979 || reloc_type
== BFD_RELOC_X86_64_32S
9980 || reloc_type
== BFD_RELOC_64
)
9982 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
9983 && (i
.op
[n
].imms
->X_op
== O_symbol
9984 || (i
.op
[n
].imms
->X_op
== O_add
9985 && ((symbol_get_value_expression
9986 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
9990 reloc_type
= BFD_RELOC_386_GOTPC
;
9992 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9994 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9995 i
.has_gotpc_tls_reloc
= true;
9996 i
.op
[n
].imms
->X_add_number
+=
9997 encoding_length (insn_start_frag
, insn_start_off
, p
);
9999 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
10000 i
.op
[n
].imms
, 0, reloc_type
);
10006 /* x86_cons_fix_new is called via the expression parsing code when a
10007 reloc is needed. We use this hook to get the correct .got reloc. */
10008 static int cons_sign
= -1;
10011 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
10012 expressionS
*exp
, bfd_reloc_code_real_type r
)
10014 r
= reloc (len
, 0, cons_sign
, r
);
10017 if (exp
->X_op
== O_secrel
)
10019 exp
->X_op
= O_symbol
;
10020 r
= BFD_RELOC_32_SECREL
;
10024 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
10027 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
10028 purpose of the `.dc.a' internal pseudo-op. */
10031 x86_address_bytes (void)
10033 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
10035 return stdoutput
->arch_info
->bits_per_address
/ 8;
10038 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
10039 || defined (LEX_AT)
10040 # define lex_got(reloc, adjust, types) NULL
10042 /* Parse operands of the form
10043 <symbol>@GOTOFF+<nnn>
10044 and similar .plt or .got references.
10046 If we find one, set up the correct relocation in RELOC and copy the
10047 input string, minus the `@GOTOFF' into a malloc'd buffer for
10048 parsing by the calling routine. Return this buffer, and if ADJUST
10049 is non-null set it to the length of the string we removed from the
10050 input line. Otherwise return NULL. */
10052 lex_got (enum bfd_reloc_code_real
*rel
,
10054 i386_operand_type
*types
)
10056 /* Some of the relocations depend on the size of what field is to
10057 be relocated. But in our callers i386_immediate and i386_displacement
10058 we don't yet know the operand size (this will be set by insn
10059 matching). Hence we record the word32 relocation here,
10060 and adjust the reloc according to the real size in reloc(). */
10061 static const struct {
10064 const enum bfd_reloc_code_real rel
[2];
10065 const i386_operand_type types64
;
10066 bool need_GOT_symbol
;
10068 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10069 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
10070 BFD_RELOC_SIZE32
},
10071 OPERAND_TYPE_IMM32_64
, false },
10073 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
10074 BFD_RELOC_X86_64_PLTOFF64
},
10075 OPERAND_TYPE_IMM64
, true },
10076 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
10077 BFD_RELOC_X86_64_PLT32
},
10078 OPERAND_TYPE_IMM32_32S_DISP32
, false },
10079 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
10080 BFD_RELOC_X86_64_GOTPLT64
},
10081 OPERAND_TYPE_IMM64_DISP64
, true },
10082 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
10083 BFD_RELOC_X86_64_GOTOFF64
},
10084 OPERAND_TYPE_IMM64_DISP64
, true },
10085 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
10086 BFD_RELOC_X86_64_GOTPCREL
},
10087 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10088 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
10089 BFD_RELOC_X86_64_TLSGD
},
10090 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10091 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
10092 _dummy_first_bfd_reloc_code_real
},
10093 OPERAND_TYPE_NONE
, true },
10094 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
10095 BFD_RELOC_X86_64_TLSLD
},
10096 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10097 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
10098 BFD_RELOC_X86_64_GOTTPOFF
},
10099 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10100 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
10101 BFD_RELOC_X86_64_TPOFF32
},
10102 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10103 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
10104 _dummy_first_bfd_reloc_code_real
},
10105 OPERAND_TYPE_NONE
, true },
10106 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
10107 BFD_RELOC_X86_64_DTPOFF32
},
10108 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10109 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
10110 _dummy_first_bfd_reloc_code_real
},
10111 OPERAND_TYPE_NONE
, true },
10112 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
10113 _dummy_first_bfd_reloc_code_real
},
10114 OPERAND_TYPE_NONE
, true },
10115 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
10116 BFD_RELOC_X86_64_GOT32
},
10117 OPERAND_TYPE_IMM32_32S_64_DISP32
, true },
10118 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
10119 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
10120 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10121 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
10122 BFD_RELOC_X86_64_TLSDESC_CALL
},
10123 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10128 #if defined (OBJ_MAYBE_ELF)
10133 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10134 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10137 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10139 int len
= gotrel
[j
].len
;
10140 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10142 if (gotrel
[j
].rel
[object_64bit
] != 0)
10145 char *tmpbuf
, *past_reloc
;
10147 *rel
= gotrel
[j
].rel
[object_64bit
];
10151 if (flag_code
!= CODE_64BIT
)
10153 types
->bitfield
.imm32
= 1;
10154 types
->bitfield
.disp32
= 1;
10157 *types
= gotrel
[j
].types64
;
10160 if (gotrel
[j
].need_GOT_symbol
&& GOT_symbol
== NULL
)
10161 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
10163 /* The length of the first part of our input line. */
10164 first
= cp
- input_line_pointer
;
10166 /* The second part goes from after the reloc token until
10167 (and including) an end_of_line char or comma. */
10168 past_reloc
= cp
+ 1 + len
;
10170 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10172 second
= cp
+ 1 - past_reloc
;
10174 /* Allocate and copy string. The trailing NUL shouldn't
10175 be necessary, but be safe. */
10176 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10177 memcpy (tmpbuf
, input_line_pointer
, first
);
10178 if (second
!= 0 && *past_reloc
!= ' ')
10179 /* Replace the relocation token with ' ', so that
10180 errors like foo@GOTOFF1 will be detected. */
10181 tmpbuf
[first
++] = ' ';
10183 /* Increment length by 1 if the relocation token is
10188 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10189 tmpbuf
[first
+ second
] = '\0';
10193 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10194 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10199 /* Might be a symbol version string. Don't as_bad here. */
10208 /* Parse operands of the form
10209 <symbol>@SECREL32+<nnn>
10211 If we find one, set up the correct relocation in RELOC and copy the
10212 input string, minus the `@SECREL32' into a malloc'd buffer for
10213 parsing by the calling routine. Return this buffer, and if ADJUST
10214 is non-null set it to the length of the string we removed from the
10215 input line. Otherwise return NULL.
10217 This function is copied from the ELF version above adjusted for PE targets. */
10220 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
10221 int *adjust ATTRIBUTE_UNUSED
,
10222 i386_operand_type
*types
)
10224 static const struct
10228 const enum bfd_reloc_code_real rel
[2];
10229 const i386_operand_type types64
;
10233 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
10234 BFD_RELOC_32_SECREL
},
10235 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
10241 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10242 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10245 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10247 int len
= gotrel
[j
].len
;
10249 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10251 if (gotrel
[j
].rel
[object_64bit
] != 0)
10254 char *tmpbuf
, *past_reloc
;
10256 *rel
= gotrel
[j
].rel
[object_64bit
];
10262 if (flag_code
!= CODE_64BIT
)
10264 types
->bitfield
.imm32
= 1;
10265 types
->bitfield
.disp32
= 1;
10268 *types
= gotrel
[j
].types64
;
10271 /* The length of the first part of our input line. */
10272 first
= cp
- input_line_pointer
;
10274 /* The second part goes from after the reloc token until
10275 (and including) an end_of_line char or comma. */
10276 past_reloc
= cp
+ 1 + len
;
10278 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10280 second
= cp
+ 1 - past_reloc
;
10282 /* Allocate and copy string. The trailing NUL shouldn't
10283 be necessary, but be safe. */
10284 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10285 memcpy (tmpbuf
, input_line_pointer
, first
);
10286 if (second
!= 0 && *past_reloc
!= ' ')
10287 /* Replace the relocation token with ' ', so that
10288 errors like foo@SECLREL321 will be detected. */
10289 tmpbuf
[first
++] = ' ';
10290 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10291 tmpbuf
[first
+ second
] = '\0';
10295 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10296 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10301 /* Might be a symbol version string. Don't as_bad here. */
10307 bfd_reloc_code_real_type
10308 x86_cons (expressionS
*exp
, int size
)
10310 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10312 intel_syntax
= -intel_syntax
;
10315 if (size
== 4 || (object_64bit
&& size
== 8))
10317 /* Handle @GOTOFF and the like in an expression. */
10319 char *gotfree_input_line
;
10322 save
= input_line_pointer
;
10323 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10324 if (gotfree_input_line
)
10325 input_line_pointer
= gotfree_input_line
;
10329 if (gotfree_input_line
)
10331 /* expression () has merrily parsed up to the end of line,
10332 or a comma - in the wrong buffer. Transfer how far
10333 input_line_pointer has moved to the right buffer. */
10334 input_line_pointer
= (save
10335 + (input_line_pointer
- gotfree_input_line
)
10337 free (gotfree_input_line
);
10338 if (exp
->X_op
== O_constant
10339 || exp
->X_op
== O_absent
10340 || exp
->X_op
== O_illegal
10341 || exp
->X_op
== O_register
10342 || exp
->X_op
== O_big
)
10344 char c
= *input_line_pointer
;
10345 *input_line_pointer
= 0;
10346 as_bad (_("missing or invalid expression `%s'"), save
);
10347 *input_line_pointer
= c
;
10349 else if ((got_reloc
== BFD_RELOC_386_PLT32
10350 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10351 && exp
->X_op
!= O_symbol
)
10353 char c
= *input_line_pointer
;
10354 *input_line_pointer
= 0;
10355 as_bad (_("invalid PLT expression `%s'"), save
);
10356 *input_line_pointer
= c
;
10363 intel_syntax
= -intel_syntax
;
10366 i386_intel_simplify (exp
);
10372 signed_cons (int size
)
10374 if (flag_code
== CODE_64BIT
)
10382 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10389 if (exp
.X_op
== O_symbol
)
10390 exp
.X_op
= O_secrel
;
10392 emit_expr (&exp
, 4);
10394 while (*input_line_pointer
++ == ',');
10396 input_line_pointer
--;
10397 demand_empty_rest_of_line ();
10401 /* Handle Vector operations. */
10404 check_VecOperations (char *op_string
, char *op_end
)
10406 const reg_entry
*mask
;
10411 && (op_end
== NULL
|| op_string
< op_end
))
10414 if (*op_string
== '{')
10418 /* Check broadcasts. */
10419 if (strncmp (op_string
, "1to", 3) == 0)
10421 unsigned int bcst_type
;
10423 if (i
.broadcast
.type
)
10424 goto duplicated_vec_op
;
10427 if (*op_string
== '8')
10429 else if (*op_string
== '4')
10431 else if (*op_string
== '2')
10433 else if (*op_string
== '1'
10434 && *(op_string
+1) == '6')
10441 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10446 i
.broadcast
.type
= bcst_type
;
10447 i
.broadcast
.operand
= this_operand
;
10449 /* Check masking operation. */
10450 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10452 if (mask
== &bad_reg
)
10455 /* k0 can't be used for write mask. */
10456 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10458 as_bad (_("`%s%s' can't be used for write mask"),
10459 register_prefix
, mask
->reg_name
);
10466 i
.mask
.operand
= this_operand
;
10468 else if (i
.mask
.reg
->reg_num
)
10469 goto duplicated_vec_op
;
10474 /* Only "{z}" is allowed here. No need to check
10475 zeroing mask explicitly. */
10476 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10478 as_bad (_("invalid write mask `%s'"), saved
);
10483 op_string
= end_op
;
10485 /* Check zeroing-flag for masking operation. */
10486 else if (*op_string
== 'z')
10490 i
.mask
.reg
= reg_k0
;
10491 i
.mask
.zeroing
= 1;
10492 i
.mask
.operand
= this_operand
;
10496 if (i
.mask
.zeroing
)
10499 as_bad (_("duplicated `%s'"), saved
);
10503 i
.mask
.zeroing
= 1;
10505 /* Only "{%k}" is allowed here. No need to check mask
10506 register explicitly. */
10507 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10509 as_bad (_("invalid zeroing-masking `%s'"),
10518 goto unknown_vec_op
;
10520 if (*op_string
!= '}')
10522 as_bad (_("missing `}' in `%s'"), saved
);
10527 /* Strip whitespace since the addition of pseudo prefixes
10528 changed how the scrubber treats '{'. */
10529 if (is_space_char (*op_string
))
10535 /* We don't know this one. */
10536 as_bad (_("unknown vector operation: `%s'"), saved
);
10540 if (i
.mask
.reg
&& i
.mask
.zeroing
&& !i
.mask
.reg
->reg_num
)
10542 as_bad (_("zeroing-masking only allowed with write mask"));
10550 i386_immediate (char *imm_start
)
10552 char *save_input_line_pointer
;
10553 char *gotfree_input_line
;
10556 i386_operand_type types
;
10558 operand_type_set (&types
, ~0);
10560 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10562 as_bad (_("at most %d immediate operands are allowed"),
10563 MAX_IMMEDIATE_OPERANDS
);
10567 exp
= &im_expressions
[i
.imm_operands
++];
10568 i
.op
[this_operand
].imms
= exp
;
10570 if (is_space_char (*imm_start
))
10573 save_input_line_pointer
= input_line_pointer
;
10574 input_line_pointer
= imm_start
;
10576 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10577 if (gotfree_input_line
)
10578 input_line_pointer
= gotfree_input_line
;
10580 exp_seg
= expression (exp
);
10582 SKIP_WHITESPACE ();
10584 /* Handle vector operations. */
10585 if (*input_line_pointer
== '{')
10587 input_line_pointer
= check_VecOperations (input_line_pointer
,
10589 if (input_line_pointer
== NULL
)
10593 if (*input_line_pointer
)
10594 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10596 input_line_pointer
= save_input_line_pointer
;
10597 if (gotfree_input_line
)
10599 free (gotfree_input_line
);
10601 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10602 exp
->X_op
= O_illegal
;
10605 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10609 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10610 i386_operand_type types
, const char *imm_start
)
10612 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10615 as_bad (_("missing or invalid immediate expression `%s'"),
10619 else if (exp
->X_op
== O_constant
)
10621 /* Size it properly later. */
10622 i
.types
[this_operand
].bitfield
.imm64
= 1;
10623 /* If not 64bit, sign extend val. */
10624 if (flag_code
!= CODE_64BIT
10625 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
10627 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
10629 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10630 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10631 && exp_seg
!= absolute_section
10632 && exp_seg
!= text_section
10633 && exp_seg
!= data_section
10634 && exp_seg
!= bss_section
10635 && exp_seg
!= undefined_section
10636 && !bfd_is_com_section (exp_seg
))
10638 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10642 else if (!intel_syntax
&& exp_seg
== reg_section
)
10645 as_bad (_("illegal immediate register operand %s"), imm_start
);
10650 /* This is an address. The size of the address will be
10651 determined later, depending on destination register,
10652 suffix, or the default for the section. */
10653 i
.types
[this_operand
].bitfield
.imm8
= 1;
10654 i
.types
[this_operand
].bitfield
.imm16
= 1;
10655 i
.types
[this_operand
].bitfield
.imm32
= 1;
10656 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10657 i
.types
[this_operand
].bitfield
.imm64
= 1;
10658 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10666 i386_scale (char *scale
)
10669 char *save
= input_line_pointer
;
10671 input_line_pointer
= scale
;
10672 val
= get_absolute_expression ();
10677 i
.log2_scale_factor
= 0;
10680 i
.log2_scale_factor
= 1;
10683 i
.log2_scale_factor
= 2;
10686 i
.log2_scale_factor
= 3;
10690 char sep
= *input_line_pointer
;
10692 *input_line_pointer
= '\0';
10693 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10695 *input_line_pointer
= sep
;
10696 input_line_pointer
= save
;
10700 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10702 as_warn (_("scale factor of %d without an index register"),
10703 1 << i
.log2_scale_factor
);
10704 i
.log2_scale_factor
= 0;
10706 scale
= input_line_pointer
;
10707 input_line_pointer
= save
;
10712 i386_displacement (char *disp_start
, char *disp_end
)
10716 char *save_input_line_pointer
;
10717 char *gotfree_input_line
;
10719 i386_operand_type bigdisp
, types
= anydisp
;
10722 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10724 as_bad (_("at most %d displacement operands are allowed"),
10725 MAX_MEMORY_OPERANDS
);
10729 operand_type_set (&bigdisp
, 0);
10731 || i
.types
[this_operand
].bitfield
.baseindex
10732 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10733 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10735 i386_addressing_mode ();
10736 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10737 if (flag_code
== CODE_64BIT
)
10741 bigdisp
.bitfield
.disp32s
= 1;
10742 bigdisp
.bitfield
.disp64
= 1;
10745 bigdisp
.bitfield
.disp32
= 1;
10747 else if ((flag_code
== CODE_16BIT
) ^ override
)
10748 bigdisp
.bitfield
.disp16
= 1;
10750 bigdisp
.bitfield
.disp32
= 1;
10754 /* For PC-relative branches, the width of the displacement may be
10755 dependent upon data size, but is never dependent upon address size.
10756 Also make sure to not unintentionally match against a non-PC-relative
10757 branch template. */
10758 static templates aux_templates
;
10759 const insn_template
*t
= current_templates
->start
;
10760 bool has_intel64
= false;
10762 aux_templates
.start
= t
;
10763 while (++t
< current_templates
->end
)
10765 if (t
->opcode_modifier
.jump
10766 != current_templates
->start
->opcode_modifier
.jump
)
10768 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10769 has_intel64
= true;
10771 if (t
< current_templates
->end
)
10773 aux_templates
.end
= t
;
10774 current_templates
= &aux_templates
;
10777 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10778 if (flag_code
== CODE_64BIT
)
10780 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10781 && (!intel64
|| !has_intel64
))
10782 bigdisp
.bitfield
.disp16
= 1;
10784 bigdisp
.bitfield
.disp32s
= 1;
10789 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10791 : LONG_MNEM_SUFFIX
));
10792 bigdisp
.bitfield
.disp32
= 1;
10793 if ((flag_code
== CODE_16BIT
) ^ override
)
10795 bigdisp
.bitfield
.disp32
= 0;
10796 bigdisp
.bitfield
.disp16
= 1;
10800 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10803 exp
= &disp_expressions
[i
.disp_operands
];
10804 i
.op
[this_operand
].disps
= exp
;
10806 save_input_line_pointer
= input_line_pointer
;
10807 input_line_pointer
= disp_start
;
10808 END_STRING_AND_SAVE (disp_end
);
10810 #ifndef GCC_ASM_O_HACK
10811 #define GCC_ASM_O_HACK 0
10814 END_STRING_AND_SAVE (disp_end
+ 1);
10815 if (i
.types
[this_operand
].bitfield
.baseIndex
10816 && displacement_string_end
[-1] == '+')
10818 /* This hack is to avoid a warning when using the "o"
10819 constraint within gcc asm statements.
10822 #define _set_tssldt_desc(n,addr,limit,type) \
10823 __asm__ __volatile__ ( \
10824 "movw %w2,%0\n\t" \
10825 "movw %w1,2+%0\n\t" \
10826 "rorl $16,%1\n\t" \
10827 "movb %b1,4+%0\n\t" \
10828 "movb %4,5+%0\n\t" \
10829 "movb $0,6+%0\n\t" \
10830 "movb %h1,7+%0\n\t" \
10832 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10834 This works great except that the output assembler ends
10835 up looking a bit weird if it turns out that there is
10836 no offset. You end up producing code that looks like:
10849 So here we provide the missing zero. */
10851 *displacement_string_end
= '0';
10854 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10855 if (gotfree_input_line
)
10856 input_line_pointer
= gotfree_input_line
;
10858 exp_seg
= expression (exp
);
10860 SKIP_WHITESPACE ();
10861 if (*input_line_pointer
)
10862 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10864 RESTORE_END_STRING (disp_end
+ 1);
10866 input_line_pointer
= save_input_line_pointer
;
10867 if (gotfree_input_line
)
10869 free (gotfree_input_line
);
10871 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10872 exp
->X_op
= O_illegal
;
10875 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
10877 RESTORE_END_STRING (disp_end
);
10883 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10884 i386_operand_type types
, const char *disp_start
)
10886 i386_operand_type bigdisp
;
10889 /* We do this to make sure that the section symbol is in
10890 the symbol table. We will ultimately change the relocation
10891 to be relative to the beginning of the section. */
10892 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
10893 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
10894 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10896 if (exp
->X_op
!= O_symbol
)
10899 if (S_IS_LOCAL (exp
->X_add_symbol
)
10900 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
10901 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
10902 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
10903 exp
->X_op
= O_subtract
;
10904 exp
->X_op_symbol
= GOT_symbol
;
10905 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
10906 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
10907 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10908 i
.reloc
[this_operand
] = BFD_RELOC_64
;
10910 i
.reloc
[this_operand
] = BFD_RELOC_32
;
10913 else if (exp
->X_op
== O_absent
10914 || exp
->X_op
== O_illegal
10915 || exp
->X_op
== O_big
)
10918 as_bad (_("missing or invalid displacement expression `%s'"),
10923 else if (flag_code
== CODE_64BIT
10924 && !i
.prefix
[ADDR_PREFIX
]
10925 && exp
->X_op
== O_constant
)
10927 /* Since displacement is signed extended to 64bit, don't allow
10928 disp32 and turn off disp32s if they are out of range. */
10929 i
.types
[this_operand
].bitfield
.disp32
= 0;
10930 if (!fits_in_signed_long (exp
->X_add_number
))
10932 i
.types
[this_operand
].bitfield
.disp32s
= 0;
10933 if (i
.types
[this_operand
].bitfield
.baseindex
)
10935 as_bad (_("0x%lx out range of signed 32bit displacement"),
10936 (long) exp
->X_add_number
);
10942 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10943 else if (exp
->X_op
!= O_constant
10944 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
10945 && exp_seg
!= absolute_section
10946 && exp_seg
!= text_section
10947 && exp_seg
!= data_section
10948 && exp_seg
!= bss_section
10949 && exp_seg
!= undefined_section
10950 && !bfd_is_com_section (exp_seg
))
10952 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10957 if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
10958 /* Constants get taken care of by optimize_disp(). */
10959 && exp
->X_op
!= O_constant
)
10960 i
.types
[this_operand
].bitfield
.disp8
= 1;
10962 /* Check if this is a displacement only operand. */
10963 bigdisp
= i
.types
[this_operand
];
10964 bigdisp
.bitfield
.disp8
= 0;
10965 bigdisp
.bitfield
.disp16
= 0;
10966 bigdisp
.bitfield
.disp32
= 0;
10967 bigdisp
.bitfield
.disp32s
= 0;
10968 bigdisp
.bitfield
.disp64
= 0;
10969 if (operand_type_all_zero (&bigdisp
))
10970 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10976 /* Return the active addressing mode, taking address override and
10977 registers forming the address into consideration. Update the
10978 address override prefix if necessary. */
10980 static enum flag_code
10981 i386_addressing_mode (void)
10983 enum flag_code addr_mode
;
10985 if (i
.prefix
[ADDR_PREFIX
])
10986 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
10987 else if (flag_code
== CODE_16BIT
10988 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
10989 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
10990 from md_assemble() by "is not a valid base/index expression"
10991 when there is a base and/or index. */
10992 && !i
.types
[this_operand
].bitfield
.baseindex
)
10994 /* MPX insn memory operands with neither base nor index must be forced
10995 to use 32-bit addressing in 16-bit mode. */
10996 addr_mode
= CODE_32BIT
;
10997 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
10999 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
11000 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
11004 addr_mode
= flag_code
;
11006 #if INFER_ADDR_PREFIX
11007 if (i
.mem_operands
== 0)
11009 /* Infer address prefix from the first memory operand. */
11010 const reg_entry
*addr_reg
= i
.base_reg
;
11012 if (addr_reg
== NULL
)
11013 addr_reg
= i
.index_reg
;
11017 if (addr_reg
->reg_type
.bitfield
.dword
)
11018 addr_mode
= CODE_32BIT
;
11019 else if (flag_code
!= CODE_64BIT
11020 && addr_reg
->reg_type
.bitfield
.word
)
11021 addr_mode
= CODE_16BIT
;
11023 if (addr_mode
!= flag_code
)
11025 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11027 /* Change the size of any displacement too. At most one
11028 of Disp16 or Disp32 is set.
11029 FIXME. There doesn't seem to be any real need for
11030 separate Disp16 and Disp32 flags. The same goes for
11031 Imm16 and Imm32. Removing them would probably clean
11032 up the code quite a lot. */
11033 if (flag_code
!= CODE_64BIT
11034 && (i
.types
[this_operand
].bitfield
.disp16
11035 || i
.types
[this_operand
].bitfield
.disp32
))
11036 i
.types
[this_operand
]
11037 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
11047 /* Make sure the memory operand we've been dealt is valid.
11048 Return 1 on success, 0 on a failure. */
11051 i386_index_check (const char *operand_string
)
11053 const char *kind
= "base/index";
11054 enum flag_code addr_mode
= i386_addressing_mode ();
11055 const insn_template
*t
= current_templates
->start
;
11057 if (t
->opcode_modifier
.isstring
11058 && !t
->cpu_flags
.bitfield
.cpupadlock
11059 && (current_templates
->end
[-1].opcode_modifier
.isstring
11060 || i
.mem_operands
))
11062 /* Memory operands of string insns are special in that they only allow
11063 a single register (rDI, rSI, or rBX) as their memory address. */
11064 const reg_entry
*expected_reg
;
11065 static const char *di_si
[][2] =
11071 static const char *bx
[] = { "ebx", "bx", "rbx" };
11073 kind
= "string address";
11075 if (t
->opcode_modifier
.prefixok
== PrefixRep
)
11077 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
11078 - IS_STRING_ES_OP0
;
11081 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
11082 || ((!i
.mem_operands
!= !intel_syntax
)
11083 && current_templates
->end
[-1].operand_types
[1]
11084 .bitfield
.baseindex
))
11087 = (const reg_entry
*) str_hash_find (reg_hash
,
11088 di_si
[addr_mode
][op
== es_op
]);
11092 = (const reg_entry
*)str_hash_find (reg_hash
, bx
[addr_mode
]);
11094 if (i
.base_reg
!= expected_reg
11096 || operand_type_check (i
.types
[this_operand
], disp
))
11098 /* The second memory operand must have the same size as
11102 && !((addr_mode
== CODE_64BIT
11103 && i
.base_reg
->reg_type
.bitfield
.qword
)
11104 || (addr_mode
== CODE_32BIT
11105 ? i
.base_reg
->reg_type
.bitfield
.dword
11106 : i
.base_reg
->reg_type
.bitfield
.word
)))
11109 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
11111 intel_syntax
? '[' : '(',
11113 expected_reg
->reg_name
,
11114 intel_syntax
? ']' : ')');
11121 as_bad (_("`%s' is not a valid %s expression"),
11122 operand_string
, kind
);
11127 if (addr_mode
!= CODE_16BIT
)
11129 /* 32-bit/64-bit checks. */
11130 if (i
.disp_encoding
== disp_encoding_16bit
)
11133 as_bad (_("invalid `%s' prefix"),
11134 addr_mode
== CODE_16BIT
? "{disp32}" : "{disp16}");
11139 && ((addr_mode
== CODE_64BIT
11140 ? !i
.base_reg
->reg_type
.bitfield
.qword
11141 : !i
.base_reg
->reg_type
.bitfield
.dword
)
11142 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
11143 || i
.base_reg
->reg_num
== RegIZ
))
11145 && !i
.index_reg
->reg_type
.bitfield
.xmmword
11146 && !i
.index_reg
->reg_type
.bitfield
.ymmword
11147 && !i
.index_reg
->reg_type
.bitfield
.zmmword
11148 && ((addr_mode
== CODE_64BIT
11149 ? !i
.index_reg
->reg_type
.bitfield
.qword
11150 : !i
.index_reg
->reg_type
.bitfield
.dword
)
11151 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
11154 /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
11155 if ((t
->opcode_modifier
.opcodeprefix
== PREFIX_0XF3
11156 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11157 && t
->base_opcode
== 0x1b)
11158 || (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11159 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11160 && (t
->base_opcode
& ~1) == 0x1a)
11161 || t
->opcode_modifier
.sib
== SIBMEM
)
11163 /* They cannot use RIP-relative addressing. */
11164 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
11166 as_bad (_("`%s' cannot be used here"), operand_string
);
11170 /* bndldx and bndstx ignore their scale factor. */
11171 if (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11172 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11173 && (t
->base_opcode
& ~1) == 0x1a
11174 && i
.log2_scale_factor
)
11175 as_warn (_("register scaling is being ignored here"));
11180 /* 16-bit checks. */
11181 if (i
.disp_encoding
== disp_encoding_32bit
)
11185 && (!i
.base_reg
->reg_type
.bitfield
.word
11186 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
11188 && (!i
.index_reg
->reg_type
.bitfield
.word
11189 || !i
.index_reg
->reg_type
.bitfield
.baseindex
11191 && i
.base_reg
->reg_num
< 6
11192 && i
.index_reg
->reg_num
>= 6
11193 && i
.log2_scale_factor
== 0))))
11200 /* Handle vector immediates. */
11203 RC_SAE_immediate (const char *imm_start
)
11205 unsigned int match_found
, j
;
11206 const char *pstr
= imm_start
;
11214 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
11216 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
11218 if (i
.rounding
.type
!= rc_none
)
11220 as_bad (_("duplicated `%s'"), imm_start
);
11224 i
.rounding
.type
= RC_NamesTable
[j
].type
;
11225 i
.rounding
.operand
= this_operand
;
11227 pstr
+= RC_NamesTable
[j
].len
;
11235 if (*pstr
++ != '}')
11237 as_bad (_("Missing '}': '%s'"), imm_start
);
11240 /* RC/SAE immediate string should contain nothing more. */;
11243 as_bad (_("Junk after '}': '%s'"), imm_start
);
11247 exp
= &im_expressions
[i
.imm_operands
++];
11248 i
.op
[this_operand
].imms
= exp
;
11250 exp
->X_op
= O_constant
;
11251 exp
->X_add_number
= 0;
11252 exp
->X_add_symbol
= (symbolS
*) 0;
11253 exp
->X_op_symbol
= (symbolS
*) 0;
11255 i
.types
[this_operand
].bitfield
.imm8
= 1;
11259 /* Only string instructions can have a second memory operand, so
11260 reduce current_templates to just those if it contains any. */
11262 maybe_adjust_templates (void)
11264 const insn_template
*t
;
11266 gas_assert (i
.mem_operands
== 1);
11268 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
11269 if (t
->opcode_modifier
.isstring
)
11272 if (t
< current_templates
->end
)
11274 static templates aux_templates
;
11277 aux_templates
.start
= t
;
11278 for (; t
< current_templates
->end
; ++t
)
11279 if (!t
->opcode_modifier
.isstring
)
11281 aux_templates
.end
= t
;
11283 /* Determine whether to re-check the first memory operand. */
11284 recheck
= (aux_templates
.start
!= current_templates
->start
11285 || t
!= current_templates
->end
);
11287 current_templates
= &aux_templates
;
11291 i
.mem_operands
= 0;
11292 if (i
.memop1_string
!= NULL
11293 && i386_index_check (i
.memop1_string
) == 0)
11295 i
.mem_operands
= 1;
11302 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11306 i386_att_operand (char *operand_string
)
11308 const reg_entry
*r
;
11310 char *op_string
= operand_string
;
11312 if (is_space_char (*op_string
))
11315 /* We check for an absolute prefix (differentiating,
11316 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11317 if (*op_string
== ABSOLUTE_PREFIX
)
11320 if (is_space_char (*op_string
))
11322 i
.jumpabsolute
= true;
11325 /* Check if operand is a register. */
11326 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
11328 i386_operand_type temp
;
11333 /* Check for a segment override by searching for ':' after a
11334 segment register. */
11335 op_string
= end_op
;
11336 if (is_space_char (*op_string
))
11338 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11340 i
.seg
[i
.mem_operands
] = r
;
11342 /* Skip the ':' and whitespace. */
11344 if (is_space_char (*op_string
))
11347 if (!is_digit_char (*op_string
)
11348 && !is_identifier_char (*op_string
)
11349 && *op_string
!= '('
11350 && *op_string
!= ABSOLUTE_PREFIX
)
11352 as_bad (_("bad memory operand `%s'"), op_string
);
11355 /* Handle case of %es:*foo. */
11356 if (*op_string
== ABSOLUTE_PREFIX
)
11359 if (is_space_char (*op_string
))
11361 i
.jumpabsolute
= true;
11363 goto do_memory_reference
;
11366 /* Handle vector operations. */
11367 if (*op_string
== '{')
11369 op_string
= check_VecOperations (op_string
, NULL
);
11370 if (op_string
== NULL
)
11376 as_bad (_("junk `%s' after register"), op_string
);
11379 temp
= r
->reg_type
;
11380 temp
.bitfield
.baseindex
= 0;
11381 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11383 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11384 i
.op
[this_operand
].regs
= r
;
11387 else if (*op_string
== REGISTER_PREFIX
)
11389 as_bad (_("bad register name `%s'"), op_string
);
11392 else if (*op_string
== IMMEDIATE_PREFIX
)
11395 if (i
.jumpabsolute
)
11397 as_bad (_("immediate operand illegal with absolute jump"));
11400 if (!i386_immediate (op_string
))
11403 else if (RC_SAE_immediate (operand_string
))
11405 /* If it is a RC or SAE immediate, do nothing. */
11408 else if (is_digit_char (*op_string
)
11409 || is_identifier_char (*op_string
)
11410 || *op_string
== '"'
11411 || *op_string
== '(')
11413 /* This is a memory reference of some sort. */
11416 /* Start and end of displacement string expression (if found). */
11417 char *displacement_string_start
;
11418 char *displacement_string_end
;
11421 do_memory_reference
:
11422 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11424 if ((i
.mem_operands
== 1
11425 && !current_templates
->start
->opcode_modifier
.isstring
)
11426 || i
.mem_operands
== 2)
11428 as_bad (_("too many memory references for `%s'"),
11429 current_templates
->start
->name
);
11433 /* Check for base index form. We detect the base index form by
11434 looking for an ')' at the end of the operand, searching
11435 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11437 base_string
= op_string
+ strlen (op_string
);
11439 /* Handle vector operations. */
11440 vop_start
= strchr (op_string
, '{');
11441 if (vop_start
&& vop_start
< base_string
)
11443 if (check_VecOperations (vop_start
, base_string
) == NULL
)
11445 base_string
= vop_start
;
11449 if (is_space_char (*base_string
))
11452 /* If we only have a displacement, set-up for it to be parsed later. */
11453 displacement_string_start
= op_string
;
11454 displacement_string_end
= base_string
+ 1;
11456 if (*base_string
== ')')
11459 unsigned int parens_balanced
= 1;
11460 /* We've already checked that the number of left & right ()'s are
11461 equal, so this loop will not be infinite. */
11465 if (*base_string
== ')')
11467 if (*base_string
== '(')
11470 while (parens_balanced
);
11472 temp_string
= base_string
;
11474 /* Skip past '(' and whitespace. */
11476 if (is_space_char (*base_string
))
11479 if (*base_string
== ','
11480 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11483 displacement_string_end
= temp_string
;
11485 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11489 if (i
.base_reg
== &bad_reg
)
11491 base_string
= end_op
;
11492 if (is_space_char (*base_string
))
11496 /* There may be an index reg or scale factor here. */
11497 if (*base_string
== ',')
11500 if (is_space_char (*base_string
))
11503 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11506 if (i
.index_reg
== &bad_reg
)
11508 base_string
= end_op
;
11509 if (is_space_char (*base_string
))
11511 if (*base_string
== ',')
11514 if (is_space_char (*base_string
))
11517 else if (*base_string
!= ')')
11519 as_bad (_("expecting `,' or `)' "
11520 "after index register in `%s'"),
11525 else if (*base_string
== REGISTER_PREFIX
)
11527 end_op
= strchr (base_string
, ',');
11530 as_bad (_("bad register name `%s'"), base_string
);
11534 /* Check for scale factor. */
11535 if (*base_string
!= ')')
11537 char *end_scale
= i386_scale (base_string
);
11542 base_string
= end_scale
;
11543 if (is_space_char (*base_string
))
11545 if (*base_string
!= ')')
11547 as_bad (_("expecting `)' "
11548 "after scale factor in `%s'"),
11553 else if (!i
.index_reg
)
11555 as_bad (_("expecting index register or scale factor "
11556 "after `,'; got '%c'"),
11561 else if (*base_string
!= ')')
11563 as_bad (_("expecting `,' or `)' "
11564 "after base register in `%s'"),
11569 else if (*base_string
== REGISTER_PREFIX
)
11571 end_op
= strchr (base_string
, ',');
11574 as_bad (_("bad register name `%s'"), base_string
);
11579 /* If there's an expression beginning the operand, parse it,
11580 assuming displacement_string_start and
11581 displacement_string_end are meaningful. */
11582 if (displacement_string_start
!= displacement_string_end
)
11584 if (!i386_displacement (displacement_string_start
,
11585 displacement_string_end
))
11589 /* Special case for (%dx) while doing input/output op. */
11591 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11592 && i
.base_reg
->reg_type
.bitfield
.word
11593 && i
.index_reg
== 0
11594 && i
.log2_scale_factor
== 0
11595 && i
.seg
[i
.mem_operands
] == 0
11596 && !operand_type_check (i
.types
[this_operand
], disp
))
11598 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11602 if (i386_index_check (operand_string
) == 0)
11604 i
.flags
[this_operand
] |= Operand_Mem
;
11605 if (i
.mem_operands
== 0)
11606 i
.memop1_string
= xstrdup (operand_string
);
11611 /* It's not a memory operand; argh! */
11612 as_bad (_("invalid char %s beginning operand %d `%s'"),
11613 output_invalid (*op_string
),
11618 return 1; /* Normal return. */
11621 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11622 that an rs_machine_dependent frag may reach. */
11625 i386_frag_max_var (fragS
*frag
)
11627 /* The only relaxable frags are for jumps.
11628 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11629 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11630 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11633 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11635 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11637 /* STT_GNU_IFUNC symbol must go through PLT. */
11638 if ((symbol_get_bfdsym (fr_symbol
)->flags
11639 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11642 if (!S_IS_EXTERNAL (fr_symbol
))
11643 /* Symbol may be weak or local. */
11644 return !S_IS_WEAK (fr_symbol
);
11646 /* Global symbols with non-default visibility can't be preempted. */
11647 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11650 if (fr_var
!= NO_RELOC
)
11651 switch ((enum bfd_reloc_code_real
) fr_var
)
11653 case BFD_RELOC_386_PLT32
:
11654 case BFD_RELOC_X86_64_PLT32
:
11655 /* Symbol with PLT relocation may be preempted. */
11661 /* Global symbols with default visibility in a shared library may be
11662 preempted by another definition. */
11667 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11668 Note also work for Skylake and Cascadelake.
11669 ---------------------------------------------------------------------
11670 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11671 | ------ | ----------- | ------- | -------- |
11673 | Jno | N | N | Y |
11674 | Jc/Jb | Y | N | Y |
11675 | Jae/Jnb | Y | N | Y |
11676 | Je/Jz | Y | Y | Y |
11677 | Jne/Jnz | Y | Y | Y |
11678 | Jna/Jbe | Y | N | Y |
11679 | Ja/Jnbe | Y | N | Y |
11681 | Jns | N | N | Y |
11682 | Jp/Jpe | N | N | Y |
11683 | Jnp/Jpo | N | N | Y |
11684 | Jl/Jnge | Y | Y | Y |
11685 | Jge/Jnl | Y | Y | Y |
11686 | Jle/Jng | Y | Y | Y |
11687 | Jg/Jnle | Y | Y | Y |
11688 --------------------------------------------------------------------- */
11690 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11692 if (mf_cmp
== mf_cmp_alu_cmp
)
11693 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11694 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11695 if (mf_cmp
== mf_cmp_incdec
)
11696 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11697 || mf_jcc
== mf_jcc_jle
);
11698 if (mf_cmp
== mf_cmp_test_and
)
11703 /* Return the next non-empty frag. */
11706 i386_next_non_empty_frag (fragS
*fragP
)
11708 /* There may be a frag with a ".fill 0" when there is no room in
11709 the current frag for frag_grow in output_insn. */
11710 for (fragP
= fragP
->fr_next
;
11712 && fragP
->fr_type
== rs_fill
11713 && fragP
->fr_fix
== 0);
11714 fragP
= fragP
->fr_next
)
11719 /* Return the next jcc frag after BRANCH_PADDING. */
11722 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11724 fragS
*branch_fragP
;
11728 if (pad_fragP
->fr_type
== rs_machine_dependent
11729 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11730 == BRANCH_PADDING
))
11732 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11733 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11735 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11736 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11737 pad_fragP
->tc_frag_data
.mf_type
))
11738 return branch_fragP
;
11744 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11747 i386_classify_machine_dependent_frag (fragS
*fragP
)
11751 fragS
*branch_fragP
;
11753 unsigned int max_prefix_length
;
11755 if (fragP
->tc_frag_data
.classified
)
11758 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11759 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11760 for (next_fragP
= fragP
;
11761 next_fragP
!= NULL
;
11762 next_fragP
= next_fragP
->fr_next
)
11764 next_fragP
->tc_frag_data
.classified
= 1;
11765 if (next_fragP
->fr_type
== rs_machine_dependent
)
11766 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11768 case BRANCH_PADDING
:
11769 /* The BRANCH_PADDING frag must be followed by a branch
11771 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11772 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11774 case FUSED_JCC_PADDING
:
11775 /* Check if this is a fused jcc:
11777 CMP like instruction
11781 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
11782 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
11783 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
11786 /* The BRANCH_PADDING frag is merged with the
11787 FUSED_JCC_PADDING frag. */
11788 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11789 /* CMP like instruction size. */
11790 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
11791 frag_wane (pad_fragP
);
11792 /* Skip to branch_fragP. */
11793 next_fragP
= branch_fragP
;
11795 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
11797 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11799 next_fragP
->fr_subtype
11800 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
11801 next_fragP
->tc_frag_data
.max_bytes
11802 = next_fragP
->tc_frag_data
.max_prefix_length
;
11803 /* This will be updated in the BRANCH_PREFIX scan. */
11804 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
11807 frag_wane (next_fragP
);
11812 /* Stop if there is no BRANCH_PREFIX. */
11813 if (!align_branch_prefix_size
)
11816 /* Scan for BRANCH_PREFIX. */
11817 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
11819 if (fragP
->fr_type
!= rs_machine_dependent
11820 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11824 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11825 COND_JUMP_PREFIX. */
11826 max_prefix_length
= 0;
11827 for (next_fragP
= fragP
;
11828 next_fragP
!= NULL
;
11829 next_fragP
= next_fragP
->fr_next
)
11831 if (next_fragP
->fr_type
== rs_fill
)
11832 /* Skip rs_fill frags. */
11834 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
11835 /* Stop for all other frags. */
11838 /* rs_machine_dependent frags. */
11839 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11842 /* Count BRANCH_PREFIX frags. */
11843 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
11845 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
11846 frag_wane (next_fragP
);
11850 += next_fragP
->tc_frag_data
.max_bytes
;
11852 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11854 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11855 == FUSED_JCC_PADDING
))
11857 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11858 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
11862 /* Stop for other rs_machine_dependent frags. */
11866 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
11868 /* Skip to the next frag. */
11869 fragP
= next_fragP
;
11873 /* Compute padding size for
11876 CMP like instruction
11878 COND_JUMP/UNCOND_JUMP
11883 COND_JUMP/UNCOND_JUMP
11887 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
11889 unsigned int offset
, size
, padding_size
;
11890 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
11892 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11894 address
= fragP
->fr_address
;
11895 address
+= fragP
->fr_fix
;
11897 /* CMP like instrunction size. */
11898 size
= fragP
->tc_frag_data
.cmp_size
;
11900 /* The base size of the branch frag. */
11901 size
+= branch_fragP
->fr_fix
;
11903 /* Add opcode and displacement bytes for the rs_machine_dependent
11905 if (branch_fragP
->fr_type
== rs_machine_dependent
)
11906 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
11908 /* Check if branch is within boundary and doesn't end at the last
11910 offset
= address
& ((1U << align_branch_power
) - 1);
11911 if ((offset
+ size
) >= (1U << align_branch_power
))
11912 /* Padding needed to avoid crossing boundary. */
11913 padding_size
= (1U << align_branch_power
) - offset
;
11915 /* No padding needed. */
11918 /* The return value may be saved in tc_frag_data.length which is
11920 if (!fits_in_unsigned_byte (padding_size
))
11923 return padding_size
;
11926 /* i386_generic_table_relax_frag()
11928 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11929 grow/shrink padding to align branch frags. Hand others to
11933 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
11935 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11936 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11938 long padding_size
= i386_branch_padding_size (fragP
, 0);
11939 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
11941 /* When the BRANCH_PREFIX frag is used, the computed address
11942 must match the actual address and there should be no padding. */
11943 if (fragP
->tc_frag_data
.padding_address
11944 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
11948 /* Update the padding size. */
11950 fragP
->tc_frag_data
.length
= padding_size
;
11954 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11956 fragS
*padding_fragP
, *next_fragP
;
11957 long padding_size
, left_size
, last_size
;
11959 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11960 if (!padding_fragP
)
11961 /* Use the padding set by the leading BRANCH_PREFIX frag. */
11962 return (fragP
->tc_frag_data
.length
11963 - fragP
->tc_frag_data
.last_length
);
11965 /* Compute the relative address of the padding frag in the very
11966 first time where the BRANCH_PREFIX frag sizes are zero. */
11967 if (!fragP
->tc_frag_data
.padding_address
)
11968 fragP
->tc_frag_data
.padding_address
11969 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
11971 /* First update the last length from the previous interation. */
11972 left_size
= fragP
->tc_frag_data
.prefix_length
;
11973 for (next_fragP
= fragP
;
11974 next_fragP
!= padding_fragP
;
11975 next_fragP
= next_fragP
->fr_next
)
11976 if (next_fragP
->fr_type
== rs_machine_dependent
11977 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11982 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11986 if (max
> left_size
)
11991 next_fragP
->tc_frag_data
.last_length
= size
;
11995 next_fragP
->tc_frag_data
.last_length
= 0;
11998 /* Check the padding size for the padding frag. */
11999 padding_size
= i386_branch_padding_size
12000 (padding_fragP
, (fragP
->fr_address
12001 + fragP
->tc_frag_data
.padding_address
));
12003 last_size
= fragP
->tc_frag_data
.prefix_length
;
12004 /* Check if there is change from the last interation. */
12005 if (padding_size
== last_size
)
12007 /* Update the expected address of the padding frag. */
12008 padding_fragP
->tc_frag_data
.padding_address
12009 = (fragP
->fr_address
+ padding_size
12010 + fragP
->tc_frag_data
.padding_address
);
12014 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
12016 /* No padding if there is no sufficient room. Clear the
12017 expected address of the padding frag. */
12018 padding_fragP
->tc_frag_data
.padding_address
= 0;
12022 /* Store the expected address of the padding frag. */
12023 padding_fragP
->tc_frag_data
.padding_address
12024 = (fragP
->fr_address
+ padding_size
12025 + fragP
->tc_frag_data
.padding_address
);
12027 fragP
->tc_frag_data
.prefix_length
= padding_size
;
12029 /* Update the length for the current interation. */
12030 left_size
= padding_size
;
12031 for (next_fragP
= fragP
;
12032 next_fragP
!= padding_fragP
;
12033 next_fragP
= next_fragP
->fr_next
)
12034 if (next_fragP
->fr_type
== rs_machine_dependent
12035 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12040 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12044 if (max
> left_size
)
12049 next_fragP
->tc_frag_data
.length
= size
;
12053 next_fragP
->tc_frag_data
.length
= 0;
12056 return (fragP
->tc_frag_data
.length
12057 - fragP
->tc_frag_data
.last_length
);
12059 return relax_frag (segment
, fragP
, stretch
);
12062 /* md_estimate_size_before_relax()
12064 Called just before relax() for rs_machine_dependent frags. The x86
12065 assembler uses these frags to handle variable size jump
12068 Any symbol that is now undefined will not become defined.
12069 Return the correct fr_subtype in the frag.
12070 Return the initial "guess for variable size of frag" to caller.
12071 The guess is actually the growth beyond the fixed part. Whatever
12072 we do to grow the fixed or variable part contributes to our
12076 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
12078 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12079 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
12080 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12082 i386_classify_machine_dependent_frag (fragP
);
12083 return fragP
->tc_frag_data
.length
;
12086 /* We've already got fragP->fr_subtype right; all we have to do is
12087 check for un-relaxable symbols. On an ELF system, we can't relax
12088 an externally visible symbol, because it may be overridden by a
12090 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
12091 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12093 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
12096 #if defined (OBJ_COFF) && defined (TE_PE)
12097 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
12098 && S_IS_WEAK (fragP
->fr_symbol
))
12102 /* Symbol is undefined in this segment, or we need to keep a
12103 reloc so that weak symbols can be overridden. */
12104 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
12105 enum bfd_reloc_code_real reloc_type
;
12106 unsigned char *opcode
;
12109 if (fragP
->fr_var
!= NO_RELOC
)
12110 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
12111 else if (size
== 2)
12112 reloc_type
= BFD_RELOC_16_PCREL
;
12113 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12114 else if (need_plt32_p (fragP
->fr_symbol
))
12115 reloc_type
= BFD_RELOC_X86_64_PLT32
;
12118 reloc_type
= BFD_RELOC_32_PCREL
;
12120 old_fr_fix
= fragP
->fr_fix
;
12121 opcode
= (unsigned char *) fragP
->fr_opcode
;
12123 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
12126 /* Make jmp (0xeb) a (d)word displacement jump. */
12128 fragP
->fr_fix
+= size
;
12129 fix_new (fragP
, old_fr_fix
, size
,
12131 fragP
->fr_offset
, 1,
12137 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
12139 /* Negate the condition, and branch past an
12140 unconditional jump. */
12143 /* Insert an unconditional jump. */
12145 /* We added two extra opcode bytes, and have a two byte
12147 fragP
->fr_fix
+= 2 + 2;
12148 fix_new (fragP
, old_fr_fix
+ 2, 2,
12150 fragP
->fr_offset
, 1,
12154 /* Fall through. */
12157 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
12161 fragP
->fr_fix
+= 1;
12162 fixP
= fix_new (fragP
, old_fr_fix
, 1,
12164 fragP
->fr_offset
, 1,
12165 BFD_RELOC_8_PCREL
);
12166 fixP
->fx_signed
= 1;
12170 /* This changes the byte-displacement jump 0x7N
12171 to the (d)word-displacement jump 0x0f,0x8N. */
12172 opcode
[1] = opcode
[0] + 0x10;
12173 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12174 /* We've added an opcode byte. */
12175 fragP
->fr_fix
+= 1 + size
;
12176 fix_new (fragP
, old_fr_fix
+ 1, size
,
12178 fragP
->fr_offset
, 1,
12183 BAD_CASE (fragP
->fr_subtype
);
12187 return fragP
->fr_fix
- old_fr_fix
;
12190 /* Guess size depending on current relax state. Initially the relax
12191 state will correspond to a short jump and we return 1, because
12192 the variable part of the frag (the branch offset) is one byte
12193 long. However, we can relax a section more than once and in that
12194 case we must either set fr_subtype back to the unrelaxed state,
12195 or return the value for the appropriate branch. */
12196 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
12199 /* Called after relax() is finished.
12201 In: Address of frag.
12202 fr_type == rs_machine_dependent.
12203 fr_subtype is what the address relaxed to.
12205 Out: Any fixSs and constants are set up.
12206 Caller will turn frag into a ".space 0". */
12209 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
12212 unsigned char *opcode
;
12213 unsigned char *where_to_put_displacement
= NULL
;
12214 offsetT target_address
;
12215 offsetT opcode_address
;
12216 unsigned int extension
= 0;
12217 offsetT displacement_from_opcode_start
;
12219 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12220 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
12221 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12223 /* Generate nop padding. */
12224 unsigned int size
= fragP
->tc_frag_data
.length
;
12227 if (size
> fragP
->tc_frag_data
.max_bytes
)
12233 const char *branch
= "branch";
12234 const char *prefix
= "";
12235 fragS
*padding_fragP
;
12236 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12239 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12240 switch (fragP
->tc_frag_data
.default_prefix
)
12245 case CS_PREFIX_OPCODE
:
12248 case DS_PREFIX_OPCODE
:
12251 case ES_PREFIX_OPCODE
:
12254 case FS_PREFIX_OPCODE
:
12257 case GS_PREFIX_OPCODE
:
12260 case SS_PREFIX_OPCODE
:
12265 msg
= _("%s:%u: add %d%s at 0x%llx to align "
12266 "%s within %d-byte boundary\n");
12268 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
12269 "align %s within %d-byte boundary\n");
12273 padding_fragP
= fragP
;
12274 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12275 "%s within %d-byte boundary\n");
12279 switch (padding_fragP
->tc_frag_data
.branch_type
)
12281 case align_branch_jcc
:
12284 case align_branch_fused
:
12285 branch
= "fused jcc";
12287 case align_branch_jmp
:
12290 case align_branch_call
:
12293 case align_branch_indirect
:
12294 branch
= "indiret branch";
12296 case align_branch_ret
:
12303 fprintf (stdout
, msg
,
12304 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
12305 (long long) fragP
->fr_address
, branch
,
12306 1 << align_branch_power
);
12308 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12309 memset (fragP
->fr_opcode
,
12310 fragP
->tc_frag_data
.default_prefix
, size
);
12312 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
12314 fragP
->fr_fix
+= size
;
12319 opcode
= (unsigned char *) fragP
->fr_opcode
;
12321 /* Address we want to reach in file space. */
12322 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12324 /* Address opcode resides at in file space. */
12325 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12327 /* Displacement from opcode start to fill into instruction. */
12328 displacement_from_opcode_start
= target_address
- opcode_address
;
12330 if ((fragP
->fr_subtype
& BIG
) == 0)
12332 /* Don't have to change opcode. */
12333 extension
= 1; /* 1 opcode + 1 displacement */
12334 where_to_put_displacement
= &opcode
[1];
12338 if (no_cond_jump_promotion
12339 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12340 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12341 _("long jump required"));
12343 switch (fragP
->fr_subtype
)
12345 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12346 extension
= 4; /* 1 opcode + 4 displacement */
12348 where_to_put_displacement
= &opcode
[1];
12351 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12352 extension
= 2; /* 1 opcode + 2 displacement */
12354 where_to_put_displacement
= &opcode
[1];
12357 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12358 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12359 extension
= 5; /* 2 opcode + 4 displacement */
12360 opcode
[1] = opcode
[0] + 0x10;
12361 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12362 where_to_put_displacement
= &opcode
[2];
12365 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12366 extension
= 3; /* 2 opcode + 2 displacement */
12367 opcode
[1] = opcode
[0] + 0x10;
12368 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12369 where_to_put_displacement
= &opcode
[2];
12372 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12377 where_to_put_displacement
= &opcode
[3];
12381 BAD_CASE (fragP
->fr_subtype
);
12386 /* If size if less then four we are sure that the operand fits,
12387 but if it's 4, then it could be that the displacement is larger
12389 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12391 && ((addressT
) (displacement_from_opcode_start
- extension
12392 + ((addressT
) 1 << 31))
12393 > (((addressT
) 2 << 31) - 1)))
12395 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12396 _("jump target out of range"));
12397 /* Make us emit 0. */
12398 displacement_from_opcode_start
= extension
;
12400 /* Now put displacement after opcode. */
12401 md_number_to_chars ((char *) where_to_put_displacement
,
12402 (valueT
) (displacement_from_opcode_start
- extension
),
12403 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12404 fragP
->fr_fix
+= extension
;
12407 /* Apply a fixup (fixP) to segment data, once it has been determined
12408 by our caller that we have all the info we need to fix it up.
12410 Parameter valP is the pointer to the value of the bits.
12412 On the 386, immediates, displacements, and data pointers are all in
12413 the same (little-endian) format, so we don't need to care about which
12414 we are handling. */
12417 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12419 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12420 valueT value
= *valP
;
12422 #if !defined (TE_Mach)
12423 if (fixP
->fx_pcrel
)
12425 switch (fixP
->fx_r_type
)
12431 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12434 case BFD_RELOC_X86_64_32S
:
12435 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12438 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12441 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12446 if (fixP
->fx_addsy
!= NULL
12447 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12448 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12449 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12450 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12451 && !use_rela_relocations
)
12453 /* This is a hack. There should be a better way to handle this.
12454 This covers for the fact that bfd_install_relocation will
12455 subtract the current location (for partial_inplace, PC relative
12456 relocations); see more below. */
12460 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12463 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12465 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12468 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12470 if ((sym_seg
== seg
12471 || (symbol_section_p (fixP
->fx_addsy
)
12472 && sym_seg
!= absolute_section
))
12473 && !generic_force_reloc (fixP
))
12475 /* Yes, we add the values in twice. This is because
12476 bfd_install_relocation subtracts them out again. I think
12477 bfd_install_relocation is broken, but I don't dare change
12479 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12483 #if defined (OBJ_COFF) && defined (TE_PE)
12484 /* For some reason, the PE format does not store a
12485 section address offset for a PC relative symbol. */
12486 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12487 || S_IS_WEAK (fixP
->fx_addsy
))
12488 value
+= md_pcrel_from (fixP
);
12491 #if defined (OBJ_COFF) && defined (TE_PE)
12492 if (fixP
->fx_addsy
!= NULL
12493 && S_IS_WEAK (fixP
->fx_addsy
)
12494 /* PR 16858: Do not modify weak function references. */
12495 && ! fixP
->fx_pcrel
)
12497 #if !defined (TE_PEP)
12498 /* For x86 PE weak function symbols are neither PC-relative
12499 nor do they set S_IS_FUNCTION. So the only reliable way
12500 to detect them is to check the flags of their containing
12502 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12503 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12507 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12511 /* Fix a few things - the dynamic linker expects certain values here,
12512 and we must not disappoint it. */
12513 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12514 if (IS_ELF
&& fixP
->fx_addsy
)
12515 switch (fixP
->fx_r_type
)
12517 case BFD_RELOC_386_PLT32
:
12518 case BFD_RELOC_X86_64_PLT32
:
12519 /* Make the jump instruction point to the address of the operand.
12520 At runtime we merely add the offset to the actual PLT entry.
12521 NB: Subtract the offset size only for jump instructions. */
12522 if (fixP
->fx_pcrel
)
12526 case BFD_RELOC_386_TLS_GD
:
12527 case BFD_RELOC_386_TLS_LDM
:
12528 case BFD_RELOC_386_TLS_IE_32
:
12529 case BFD_RELOC_386_TLS_IE
:
12530 case BFD_RELOC_386_TLS_GOTIE
:
12531 case BFD_RELOC_386_TLS_GOTDESC
:
12532 case BFD_RELOC_X86_64_TLSGD
:
12533 case BFD_RELOC_X86_64_TLSLD
:
12534 case BFD_RELOC_X86_64_GOTTPOFF
:
12535 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12536 value
= 0; /* Fully resolved at runtime. No addend. */
12538 case BFD_RELOC_386_TLS_LE
:
12539 case BFD_RELOC_386_TLS_LDO_32
:
12540 case BFD_RELOC_386_TLS_LE_32
:
12541 case BFD_RELOC_X86_64_DTPOFF32
:
12542 case BFD_RELOC_X86_64_DTPOFF64
:
12543 case BFD_RELOC_X86_64_TPOFF32
:
12544 case BFD_RELOC_X86_64_TPOFF64
:
12545 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12548 case BFD_RELOC_386_TLS_DESC_CALL
:
12549 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12550 value
= 0; /* Fully resolved at runtime. No addend. */
12551 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12555 case BFD_RELOC_VTABLE_INHERIT
:
12556 case BFD_RELOC_VTABLE_ENTRY
:
12563 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12565 #endif /* !defined (TE_Mach) */
12567 /* Are we finished with this relocation now? */
12568 if (fixP
->fx_addsy
== NULL
)
12570 #if defined (OBJ_COFF) && defined (TE_PE)
12571 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12574 /* Remember value for tc_gen_reloc. */
12575 fixP
->fx_addnumber
= value
;
12576 /* Clear out the frag for now. */
12580 else if (use_rela_relocations
)
12582 fixP
->fx_no_overflow
= 1;
12583 /* Remember value for tc_gen_reloc. */
12584 fixP
->fx_addnumber
= value
;
12588 md_number_to_chars (p
, value
, fixP
->fx_size
);
12592 md_atof (int type
, char *litP
, int *sizeP
)
12594 /* This outputs the LITTLENUMs in REVERSE order;
12595 in accord with the bigendian 386. */
12596 return ieee_md_atof (type
, litP
, sizeP
, false);
12599 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12602 output_invalid (int c
)
12605 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12608 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12609 "(0x%x)", (unsigned char) c
);
12610 return output_invalid_buf
;
12613 /* Verify that @r can be used in the current context. */
12615 static bool check_register (const reg_entry
*r
)
12617 if (allow_pseudo_reg
)
12620 if (operand_type_all_zero (&r
->reg_type
))
12623 if ((r
->reg_type
.bitfield
.dword
12624 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12625 || r
->reg_type
.bitfield
.class == RegCR
12626 || r
->reg_type
.bitfield
.class == RegDR
)
12627 && !cpu_arch_flags
.bitfield
.cpui386
)
12630 if (r
->reg_type
.bitfield
.class == RegTR
12631 && (flag_code
== CODE_64BIT
12632 || !cpu_arch_flags
.bitfield
.cpui386
12633 || cpu_arch_isa_flags
.bitfield
.cpui586
12634 || cpu_arch_isa_flags
.bitfield
.cpui686
))
12637 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12640 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12642 if (r
->reg_type
.bitfield
.zmmword
12643 || r
->reg_type
.bitfield
.class == RegMask
)
12646 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12648 if (r
->reg_type
.bitfield
.ymmword
)
12651 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12656 if (r
->reg_type
.bitfield
.tmmword
12657 && (!cpu_arch_flags
.bitfield
.cpuamx_tile
12658 || flag_code
!= CODE_64BIT
))
12661 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12664 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12665 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12668 /* Upper 16 vector registers are only available with VREX in 64bit
12669 mode, and require EVEX encoding. */
12670 if (r
->reg_flags
& RegVRex
)
12672 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12673 || flag_code
!= CODE_64BIT
)
12676 if (i
.vec_encoding
== vex_encoding_default
)
12677 i
.vec_encoding
= vex_encoding_evex
;
12678 else if (i
.vec_encoding
!= vex_encoding_evex
)
12679 i
.vec_encoding
= vex_encoding_error
;
12682 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12683 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12684 && flag_code
!= CODE_64BIT
)
12687 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12694 /* REG_STRING starts *before* REGISTER_PREFIX. */
12696 static const reg_entry
*
12697 parse_real_register (char *reg_string
, char **end_op
)
12699 char *s
= reg_string
;
12701 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12702 const reg_entry
*r
;
12704 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12705 if (*s
== REGISTER_PREFIX
)
12708 if (is_space_char (*s
))
12711 p
= reg_name_given
;
12712 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12714 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12715 return (const reg_entry
*) NULL
;
12719 /* For naked regs, make sure that we are not dealing with an identifier.
12720 This prevents confusing an identifier like `eax_var' with register
12722 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12723 return (const reg_entry
*) NULL
;
12727 r
= (const reg_entry
*) str_hash_find (reg_hash
, reg_name_given
);
12729 /* Handle floating point regs, allowing spaces in the (i) part. */
12732 if (!cpu_arch_flags
.bitfield
.cpu8087
12733 && !cpu_arch_flags
.bitfield
.cpu287
12734 && !cpu_arch_flags
.bitfield
.cpu387
12735 && !allow_pseudo_reg
)
12736 return (const reg_entry
*) NULL
;
12738 if (is_space_char (*s
))
12743 if (is_space_char (*s
))
12745 if (*s
>= '0' && *s
<= '7')
12747 int fpr
= *s
- '0';
12749 if (is_space_char (*s
))
12754 know (r
[fpr
].reg_num
== fpr
);
12758 /* We have "%st(" then garbage. */
12759 return (const reg_entry
*) NULL
;
12763 return r
&& check_register (r
) ? r
: NULL
;
12766 /* REG_STRING starts *before* REGISTER_PREFIX. */
12768 static const reg_entry
*
12769 parse_register (char *reg_string
, char **end_op
)
12771 const reg_entry
*r
;
12773 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
12774 r
= parse_real_register (reg_string
, end_op
);
12779 char *save
= input_line_pointer
;
12783 input_line_pointer
= reg_string
;
12784 c
= get_symbol_name (®_string
);
12785 symbolP
= symbol_find (reg_string
);
12786 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
12788 const expressionS
*e
= symbol_get_value_expression (symbolP
);
12790 know (e
->X_op
== O_register
);
12791 know (e
->X_add_number
>= 0
12792 && (valueT
) e
->X_add_number
< i386_regtab_size
);
12793 r
= i386_regtab
+ e
->X_add_number
;
12794 if (!check_register (r
))
12796 as_bad (_("register '%s%s' cannot be used here"),
12797 register_prefix
, r
->reg_name
);
12800 *end_op
= input_line_pointer
;
12802 *input_line_pointer
= c
;
12803 input_line_pointer
= save
;
12809 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
12811 const reg_entry
*r
;
12812 char *end
= input_line_pointer
;
12815 r
= parse_register (name
, &input_line_pointer
);
12816 if (r
&& end
<= input_line_pointer
)
12818 *nextcharP
= *input_line_pointer
;
12819 *input_line_pointer
= 0;
12822 e
->X_op
= O_register
;
12823 e
->X_add_number
= r
- i386_regtab
;
12826 e
->X_op
= O_illegal
;
12829 input_line_pointer
= end
;
12831 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
12835 md_operand (expressionS
*e
)
12838 const reg_entry
*r
;
12840 switch (*input_line_pointer
)
12842 case REGISTER_PREFIX
:
12843 r
= parse_real_register (input_line_pointer
, &end
);
12846 e
->X_op
= O_register
;
12847 e
->X_add_number
= r
- i386_regtab
;
12848 input_line_pointer
= end
;
12853 gas_assert (intel_syntax
);
12854 end
= input_line_pointer
++;
12856 if (*input_line_pointer
== ']')
12858 ++input_line_pointer
;
12859 e
->X_op_symbol
= make_expr_symbol (e
);
12860 e
->X_add_symbol
= NULL
;
12861 e
->X_add_number
= 0;
12866 e
->X_op
= O_absent
;
12867 input_line_pointer
= end
;
12874 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12875 const char *md_shortopts
= "kVQ:sqnO::";
12877 const char *md_shortopts
= "qnO::";
12880 #define OPTION_32 (OPTION_MD_BASE + 0)
12881 #define OPTION_64 (OPTION_MD_BASE + 1)
12882 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12883 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12884 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12885 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12886 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12887 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12888 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12889 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12890 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12891 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12892 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12893 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12894 #define OPTION_X32 (OPTION_MD_BASE + 14)
12895 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12896 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12897 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12898 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12899 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12900 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12901 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12902 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12903 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12904 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12905 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12906 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12907 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12908 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
12909 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
12910 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
12911 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
12912 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
12913 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
12915 struct option md_longopts
[] =
12917 {"32", no_argument
, NULL
, OPTION_32
},
12918 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12919 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12920 {"64", no_argument
, NULL
, OPTION_64
},
12922 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12923 {"x32", no_argument
, NULL
, OPTION_X32
},
12924 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
12925 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
12927 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
12928 {"march", required_argument
, NULL
, OPTION_MARCH
},
12929 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
12930 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
12931 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
12932 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
12933 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
12934 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
12935 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
12936 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
12937 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
12938 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
12939 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
12940 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
12941 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
12942 # if defined (TE_PE) || defined (TE_PEP)
12943 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
12945 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
12946 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
12947 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
12948 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
12949 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
12950 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
12951 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
12952 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
12953 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
12954 {"mlfence-before-indirect-branch", required_argument
, NULL
,
12955 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
12956 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
12957 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
12958 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
12959 {NULL
, no_argument
, NULL
, 0}
12961 size_t md_longopts_size
= sizeof (md_longopts
);
12964 md_parse_option (int c
, const char *arg
)
12967 char *arch
, *next
, *saved
, *type
;
12972 optimize_align_code
= 0;
12976 quiet_warnings
= 1;
12979 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12980 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
12981 should be emitted or not. FIXME: Not implemented. */
12983 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
12987 /* -V: SVR4 argument to print version ID. */
12989 print_version_id ();
12992 /* -k: Ignore for FreeBSD compatibility. */
12997 /* -s: On i386 Solaris, this tells the native assembler to use
12998 .stab instead of .stab.excl. We always use .stab anyhow. */
13001 case OPTION_MSHARED
:
13005 case OPTION_X86_USED_NOTE
:
13006 if (strcasecmp (arg
, "yes") == 0)
13008 else if (strcasecmp (arg
, "no") == 0)
13011 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
13016 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13017 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13020 const char **list
, **l
;
13022 list
= bfd_target_list ();
13023 for (l
= list
; *l
!= NULL
; l
++)
13024 if (startswith (*l
, "elf64-x86-64")
13025 || strcmp (*l
, "coff-x86-64") == 0
13026 || strcmp (*l
, "pe-x86-64") == 0
13027 || strcmp (*l
, "pei-x86-64") == 0
13028 || strcmp (*l
, "mach-o-x86-64") == 0)
13030 default_arch
= "x86_64";
13034 as_fatal (_("no compiled in support for x86_64"));
13040 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13044 const char **list
, **l
;
13046 list
= bfd_target_list ();
13047 for (l
= list
; *l
!= NULL
; l
++)
13048 if (startswith (*l
, "elf32-x86-64"))
13050 default_arch
= "x86_64:32";
13054 as_fatal (_("no compiled in support for 32bit x86_64"));
13058 as_fatal (_("32bit x86_64 is only supported for ELF"));
13063 default_arch
= "i386";
13066 case OPTION_DIVIDE
:
13067 #ifdef SVR4_COMMENT_CHARS
13072 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
13074 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
13078 i386_comment_chars
= n
;
13084 saved
= xstrdup (arg
);
13086 /* Allow -march=+nosse. */
13092 as_fatal (_("invalid -march= option: `%s'"), arg
);
13093 next
= strchr (arch
, '+');
13096 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13098 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
13101 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13104 cpu_arch_name
= cpu_arch
[j
].name
;
13105 cpu_sub_arch_name
= NULL
;
13106 cpu_arch_flags
= cpu_arch
[j
].flags
;
13107 cpu_arch_isa
= cpu_arch
[j
].type
;
13108 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
13109 if (!cpu_arch_tune_set
)
13111 cpu_arch_tune
= cpu_arch_isa
;
13112 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13116 else if (*cpu_arch
[j
].name
== '.'
13117 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
13119 /* ISA extension. */
13120 i386_cpu_flags flags
;
13122 flags
= cpu_flags_or (cpu_arch_flags
,
13123 cpu_arch
[j
].flags
);
13125 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13127 if (cpu_sub_arch_name
)
13129 char *name
= cpu_sub_arch_name
;
13130 cpu_sub_arch_name
= concat (name
,
13132 (const char *) NULL
);
13136 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
13137 cpu_arch_flags
= flags
;
13138 cpu_arch_isa_flags
= flags
;
13142 = cpu_flags_or (cpu_arch_isa_flags
,
13143 cpu_arch
[j
].flags
);
13148 if (j
>= ARRAY_SIZE (cpu_arch
))
13150 /* Disable an ISA extension. */
13151 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13152 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
13154 i386_cpu_flags flags
;
13156 flags
= cpu_flags_and_not (cpu_arch_flags
,
13157 cpu_noarch
[j
].flags
);
13158 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13160 if (cpu_sub_arch_name
)
13162 char *name
= cpu_sub_arch_name
;
13163 cpu_sub_arch_name
= concat (arch
,
13164 (const char *) NULL
);
13168 cpu_sub_arch_name
= xstrdup (arch
);
13169 cpu_arch_flags
= flags
;
13170 cpu_arch_isa_flags
= flags
;
13175 if (j
>= ARRAY_SIZE (cpu_noarch
))
13176 j
= ARRAY_SIZE (cpu_arch
);
13179 if (j
>= ARRAY_SIZE (cpu_arch
))
13180 as_fatal (_("invalid -march= option: `%s'"), arg
);
13184 while (next
!= NULL
);
13190 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13191 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13193 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
13195 cpu_arch_tune_set
= 1;
13196 cpu_arch_tune
= cpu_arch
[j
].type
;
13197 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
13201 if (j
>= ARRAY_SIZE (cpu_arch
))
13202 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13205 case OPTION_MMNEMONIC
:
13206 if (strcasecmp (arg
, "att") == 0)
13207 intel_mnemonic
= 0;
13208 else if (strcasecmp (arg
, "intel") == 0)
13209 intel_mnemonic
= 1;
13211 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
13214 case OPTION_MSYNTAX
:
13215 if (strcasecmp (arg
, "att") == 0)
13217 else if (strcasecmp (arg
, "intel") == 0)
13220 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
13223 case OPTION_MINDEX_REG
:
13224 allow_index_reg
= 1;
13227 case OPTION_MNAKED_REG
:
13228 allow_naked_reg
= 1;
13231 case OPTION_MSSE2AVX
:
13235 case OPTION_MSSE_CHECK
:
13236 if (strcasecmp (arg
, "error") == 0)
13237 sse_check
= check_error
;
13238 else if (strcasecmp (arg
, "warning") == 0)
13239 sse_check
= check_warning
;
13240 else if (strcasecmp (arg
, "none") == 0)
13241 sse_check
= check_none
;
13243 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
13246 case OPTION_MOPERAND_CHECK
:
13247 if (strcasecmp (arg
, "error") == 0)
13248 operand_check
= check_error
;
13249 else if (strcasecmp (arg
, "warning") == 0)
13250 operand_check
= check_warning
;
13251 else if (strcasecmp (arg
, "none") == 0)
13252 operand_check
= check_none
;
13254 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
13257 case OPTION_MAVXSCALAR
:
13258 if (strcasecmp (arg
, "128") == 0)
13259 avxscalar
= vex128
;
13260 else if (strcasecmp (arg
, "256") == 0)
13261 avxscalar
= vex256
;
13263 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
13266 case OPTION_MVEXWIG
:
13267 if (strcmp (arg
, "0") == 0)
13269 else if (strcmp (arg
, "1") == 0)
13272 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
13275 case OPTION_MADD_BND_PREFIX
:
13276 add_bnd_prefix
= 1;
13279 case OPTION_MEVEXLIG
:
13280 if (strcmp (arg
, "128") == 0)
13281 evexlig
= evexl128
;
13282 else if (strcmp (arg
, "256") == 0)
13283 evexlig
= evexl256
;
13284 else if (strcmp (arg
, "512") == 0)
13285 evexlig
= evexl512
;
13287 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
13290 case OPTION_MEVEXRCIG
:
13291 if (strcmp (arg
, "rne") == 0)
13293 else if (strcmp (arg
, "rd") == 0)
13295 else if (strcmp (arg
, "ru") == 0)
13297 else if (strcmp (arg
, "rz") == 0)
13300 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
13303 case OPTION_MEVEXWIG
:
13304 if (strcmp (arg
, "0") == 0)
13306 else if (strcmp (arg
, "1") == 0)
13309 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
13312 # if defined (TE_PE) || defined (TE_PEP)
13313 case OPTION_MBIG_OBJ
:
13318 case OPTION_MOMIT_LOCK_PREFIX
:
13319 if (strcasecmp (arg
, "yes") == 0)
13320 omit_lock_prefix
= 1;
13321 else if (strcasecmp (arg
, "no") == 0)
13322 omit_lock_prefix
= 0;
13324 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
13327 case OPTION_MFENCE_AS_LOCK_ADD
:
13328 if (strcasecmp (arg
, "yes") == 0)
13330 else if (strcasecmp (arg
, "no") == 0)
13333 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
13336 case OPTION_MLFENCE_AFTER_LOAD
:
13337 if (strcasecmp (arg
, "yes") == 0)
13338 lfence_after_load
= 1;
13339 else if (strcasecmp (arg
, "no") == 0)
13340 lfence_after_load
= 0;
13342 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
13345 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
13346 if (strcasecmp (arg
, "all") == 0)
13348 lfence_before_indirect_branch
= lfence_branch_all
;
13349 if (lfence_before_ret
== lfence_before_ret_none
)
13350 lfence_before_ret
= lfence_before_ret_shl
;
13352 else if (strcasecmp (arg
, "memory") == 0)
13353 lfence_before_indirect_branch
= lfence_branch_memory
;
13354 else if (strcasecmp (arg
, "register") == 0)
13355 lfence_before_indirect_branch
= lfence_branch_register
;
13356 else if (strcasecmp (arg
, "none") == 0)
13357 lfence_before_indirect_branch
= lfence_branch_none
;
13359 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13363 case OPTION_MLFENCE_BEFORE_RET
:
13364 if (strcasecmp (arg
, "or") == 0)
13365 lfence_before_ret
= lfence_before_ret_or
;
13366 else if (strcasecmp (arg
, "not") == 0)
13367 lfence_before_ret
= lfence_before_ret_not
;
13368 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
13369 lfence_before_ret
= lfence_before_ret_shl
;
13370 else if (strcasecmp (arg
, "none") == 0)
13371 lfence_before_ret
= lfence_before_ret_none
;
13373 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13377 case OPTION_MRELAX_RELOCATIONS
:
13378 if (strcasecmp (arg
, "yes") == 0)
13379 generate_relax_relocations
= 1;
13380 else if (strcasecmp (arg
, "no") == 0)
13381 generate_relax_relocations
= 0;
13383 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13386 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13389 long int align
= strtoul (arg
, &end
, 0);
13394 align_branch_power
= 0;
13397 else if (align
>= 16)
13400 for (align_power
= 0;
13402 align
>>= 1, align_power
++)
13404 /* Limit alignment power to 31. */
13405 if (align
== 1 && align_power
< 32)
13407 align_branch_power
= align_power
;
13412 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13416 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13419 int align
= strtoul (arg
, &end
, 0);
13420 /* Some processors only support 5 prefixes. */
13421 if (*end
== '\0' && align
>= 0 && align
< 6)
13423 align_branch_prefix_size
= align
;
13426 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13431 case OPTION_MALIGN_BRANCH
:
13433 saved
= xstrdup (arg
);
13437 next
= strchr (type
, '+');
13440 if (strcasecmp (type
, "jcc") == 0)
13441 align_branch
|= align_branch_jcc_bit
;
13442 else if (strcasecmp (type
, "fused") == 0)
13443 align_branch
|= align_branch_fused_bit
;
13444 else if (strcasecmp (type
, "jmp") == 0)
13445 align_branch
|= align_branch_jmp_bit
;
13446 else if (strcasecmp (type
, "call") == 0)
13447 align_branch
|= align_branch_call_bit
;
13448 else if (strcasecmp (type
, "ret") == 0)
13449 align_branch
|= align_branch_ret_bit
;
13450 else if (strcasecmp (type
, "indirect") == 0)
13451 align_branch
|= align_branch_indirect_bit
;
13453 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13456 while (next
!= NULL
);
13460 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13461 align_branch_power
= 5;
13462 align_branch_prefix_size
= 5;
13463 align_branch
= (align_branch_jcc_bit
13464 | align_branch_fused_bit
13465 | align_branch_jmp_bit
);
13468 case OPTION_MAMD64
:
13472 case OPTION_MINTEL64
:
13480 /* Turn off -Os. */
13481 optimize_for_space
= 0;
13483 else if (*arg
== 's')
13485 optimize_for_space
= 1;
13486 /* Turn on all encoding optimizations. */
13487 optimize
= INT_MAX
;
13491 optimize
= atoi (arg
);
13492 /* Turn off -Os. */
13493 optimize_for_space
= 0;
13503 #define MESSAGE_TEMPLATE \
13507 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13508 int *left_p
, const char *name
, int len
)
13510 int size
= sizeof (MESSAGE_TEMPLATE
);
13511 int left
= *left_p
;
13513 /* Reserve 2 spaces for ", " or ",\0" */
13516 /* Check if there is any room. */
13524 p
= mempcpy (p
, name
, len
);
13528 /* Output the current message now and start a new one. */
13531 fprintf (stream
, "%s\n", message
);
13533 left
= size
- (start
- message
) - len
- 2;
13535 gas_assert (left
>= 0);
13537 p
= mempcpy (p
, name
, len
);
13545 show_arch (FILE *stream
, int ext
, int check
)
13547 static char message
[] = MESSAGE_TEMPLATE
;
13548 char *start
= message
+ 27;
13550 int size
= sizeof (MESSAGE_TEMPLATE
);
13557 left
= size
- (start
- message
);
13558 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13560 /* Should it be skipped? */
13561 if (cpu_arch
[j
].skip
)
13564 name
= cpu_arch
[j
].name
;
13565 len
= cpu_arch
[j
].len
;
13568 /* It is an extension. Skip if we aren't asked to show it. */
13579 /* It is an processor. Skip if we show only extension. */
13582 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13584 /* It is an impossible processor - skip. */
13588 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13591 /* Display disabled extensions. */
13593 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13595 name
= cpu_noarch
[j
].name
;
13596 len
= cpu_noarch
[j
].len
;
13597 p
= output_message (stream
, p
, message
, start
, &left
, name
,
13602 fprintf (stream
, "%s\n", message
);
13606 md_show_usage (FILE *stream
)
13608 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13609 fprintf (stream
, _("\
13610 -Qy, -Qn ignored\n\
13611 -V print assembler version number\n\
13614 fprintf (stream
, _("\
13615 -n Do not optimize code alignment\n\
13616 -q quieten some warnings\n"));
13617 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13618 fprintf (stream
, _("\
13621 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13622 || defined (TE_PE) || defined (TE_PEP))
13623 fprintf (stream
, _("\
13624 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
13626 #ifdef SVR4_COMMENT_CHARS
13627 fprintf (stream
, _("\
13628 --divide do not treat `/' as a comment character\n"));
13630 fprintf (stream
, _("\
13631 --divide ignored\n"));
13633 fprintf (stream
, _("\
13634 -march=CPU[,+EXTENSION...]\n\
13635 generate code for CPU and EXTENSION, CPU is one of:\n"));
13636 show_arch (stream
, 0, 1);
13637 fprintf (stream
, _("\
13638 EXTENSION is combination of:\n"));
13639 show_arch (stream
, 1, 0);
13640 fprintf (stream
, _("\
13641 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13642 show_arch (stream
, 0, 0);
13643 fprintf (stream
, _("\
13644 -msse2avx encode SSE instructions with VEX prefix\n"));
13645 fprintf (stream
, _("\
13646 -msse-check=[none|error|warning] (default: warning)\n\
13647 check SSE instructions\n"));
13648 fprintf (stream
, _("\
13649 -moperand-check=[none|error|warning] (default: warning)\n\
13650 check operand combinations for validity\n"));
13651 fprintf (stream
, _("\
13652 -mavxscalar=[128|256] (default: 128)\n\
13653 encode scalar AVX instructions with specific vector\n\
13655 fprintf (stream
, _("\
13656 -mvexwig=[0|1] (default: 0)\n\
13657 encode VEX instructions with specific VEX.W value\n\
13658 for VEX.W bit ignored instructions\n"));
13659 fprintf (stream
, _("\
13660 -mevexlig=[128|256|512] (default: 128)\n\
13661 encode scalar EVEX instructions with specific vector\n\
13663 fprintf (stream
, _("\
13664 -mevexwig=[0|1] (default: 0)\n\
13665 encode EVEX instructions with specific EVEX.W value\n\
13666 for EVEX.W bit ignored instructions\n"));
13667 fprintf (stream
, _("\
13668 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13669 encode EVEX instructions with specific EVEX.RC value\n\
13670 for SAE-only ignored instructions\n"));
13671 fprintf (stream
, _("\
13672 -mmnemonic=[att|intel] "));
13673 if (SYSV386_COMPAT
)
13674 fprintf (stream
, _("(default: att)\n"));
13676 fprintf (stream
, _("(default: intel)\n"));
13677 fprintf (stream
, _("\
13678 use AT&T/Intel mnemonic\n"));
13679 fprintf (stream
, _("\
13680 -msyntax=[att|intel] (default: att)\n\
13681 use AT&T/Intel syntax\n"));
13682 fprintf (stream
, _("\
13683 -mindex-reg support pseudo index registers\n"));
13684 fprintf (stream
, _("\
13685 -mnaked-reg don't require `%%' prefix for registers\n"));
13686 fprintf (stream
, _("\
13687 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13688 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13689 fprintf (stream
, _("\
13690 -mshared disable branch optimization for shared code\n"));
13691 fprintf (stream
, _("\
13692 -mx86-used-note=[no|yes] "));
13693 if (DEFAULT_X86_USED_NOTE
)
13694 fprintf (stream
, _("(default: yes)\n"));
13696 fprintf (stream
, _("(default: no)\n"));
13697 fprintf (stream
, _("\
13698 generate x86 used ISA and feature properties\n"));
13700 #if defined (TE_PE) || defined (TE_PEP)
13701 fprintf (stream
, _("\
13702 -mbig-obj generate big object files\n"));
13704 fprintf (stream
, _("\
13705 -momit-lock-prefix=[no|yes] (default: no)\n\
13706 strip all lock prefixes\n"));
13707 fprintf (stream
, _("\
13708 -mfence-as-lock-add=[no|yes] (default: no)\n\
13709 encode lfence, mfence and sfence as\n\
13710 lock addl $0x0, (%%{re}sp)\n"));
13711 fprintf (stream
, _("\
13712 -mrelax-relocations=[no|yes] "));
13713 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13714 fprintf (stream
, _("(default: yes)\n"));
13716 fprintf (stream
, _("(default: no)\n"));
13717 fprintf (stream
, _("\
13718 generate relax relocations\n"));
13719 fprintf (stream
, _("\
13720 -malign-branch-boundary=NUM (default: 0)\n\
13721 align branches within NUM byte boundary\n"));
13722 fprintf (stream
, _("\
13723 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13724 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13726 specify types of branches to align\n"));
13727 fprintf (stream
, _("\
13728 -malign-branch-prefix-size=NUM (default: 5)\n\
13729 align branches with NUM prefixes per instruction\n"));
13730 fprintf (stream
, _("\
13731 -mbranches-within-32B-boundaries\n\
13732 align branches within 32 byte boundary\n"));
13733 fprintf (stream
, _("\
13734 -mlfence-after-load=[no|yes] (default: no)\n\
13735 generate lfence after load\n"));
13736 fprintf (stream
, _("\
13737 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13738 generate lfence before indirect near branch\n"));
13739 fprintf (stream
, _("\
13740 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13741 generate lfence before ret\n"));
13742 fprintf (stream
, _("\
13743 -mamd64 accept only AMD64 ISA [default]\n"));
13744 fprintf (stream
, _("\
13745 -mintel64 accept only Intel64 ISA\n"));
13748 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13749 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13750 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13752 /* Pick the target format to use. */
13755 i386_target_format (void)
13757 if (!strncmp (default_arch
, "x86_64", 6))
13759 update_code_flag (CODE_64BIT
, 1);
13760 if (default_arch
[6] == '\0')
13761 x86_elf_abi
= X86_64_ABI
;
13763 x86_elf_abi
= X86_64_X32_ABI
;
13765 else if (!strcmp (default_arch
, "i386"))
13766 update_code_flag (CODE_32BIT
, 1);
13767 else if (!strcmp (default_arch
, "iamcu"))
13769 update_code_flag (CODE_32BIT
, 1);
13770 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
13772 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
13773 cpu_arch_name
= "iamcu";
13774 cpu_sub_arch_name
= NULL
;
13775 cpu_arch_flags
= iamcu_flags
;
13776 cpu_arch_isa
= PROCESSOR_IAMCU
;
13777 cpu_arch_isa_flags
= iamcu_flags
;
13778 if (!cpu_arch_tune_set
)
13780 cpu_arch_tune
= cpu_arch_isa
;
13781 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13784 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
13785 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
13789 as_fatal (_("unknown architecture"));
13791 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
13792 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13793 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
13794 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13796 switch (OUTPUT_FLAVOR
)
13798 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
13799 case bfd_target_aout_flavour
:
13800 return AOUT_TARGET_FORMAT
;
13802 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
13803 # if defined (TE_PE) || defined (TE_PEP)
13804 case bfd_target_coff_flavour
:
13805 if (flag_code
== CODE_64BIT
)
13806 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
13808 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
13809 # elif defined (TE_GO32)
13810 case bfd_target_coff_flavour
:
13811 return "coff-go32";
13813 case bfd_target_coff_flavour
:
13814 return "coff-i386";
13817 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13818 case bfd_target_elf_flavour
:
13820 const char *format
;
13822 switch (x86_elf_abi
)
13825 format
= ELF_TARGET_FORMAT
;
13827 tls_get_addr
= "___tls_get_addr";
13831 use_rela_relocations
= 1;
13834 tls_get_addr
= "__tls_get_addr";
13836 format
= ELF_TARGET_FORMAT64
;
13838 case X86_64_X32_ABI
:
13839 use_rela_relocations
= 1;
13842 tls_get_addr
= "__tls_get_addr";
13844 disallow_64bit_reloc
= 1;
13845 format
= ELF_TARGET_FORMAT32
;
13848 if (cpu_arch_isa
== PROCESSOR_L1OM
)
13850 if (x86_elf_abi
!= X86_64_ABI
)
13851 as_fatal (_("Intel L1OM is 64bit only"));
13852 return ELF_TARGET_L1OM_FORMAT
;
13854 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
13856 if (x86_elf_abi
!= X86_64_ABI
)
13857 as_fatal (_("Intel K1OM is 64bit only"));
13858 return ELF_TARGET_K1OM_FORMAT
;
13860 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
13862 if (x86_elf_abi
!= I386_ABI
)
13863 as_fatal (_("Intel MCU is 32bit only"));
13864 return ELF_TARGET_IAMCU_FORMAT
;
13870 #if defined (OBJ_MACH_O)
13871 case bfd_target_mach_o_flavour
:
13872 if (flag_code
== CODE_64BIT
)
13874 use_rela_relocations
= 1;
13876 return "mach-o-x86-64";
13879 return "mach-o-i386";
13887 #endif /* OBJ_MAYBE_ more than one */
13890 md_undefined_symbol (char *name
)
13892 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
13893 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
13894 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
13895 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
13899 if (symbol_find (name
))
13900 as_bad (_("GOT already in symbol table"));
13901 GOT_symbol
= symbol_new (name
, undefined_section
,
13902 &zero_address_frag
, 0);
13909 /* Round up a section size to the appropriate boundary. */
13912 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
13914 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
13915 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
13917 /* For a.out, force the section size to be aligned. If we don't do
13918 this, BFD will align it for us, but it will not write out the
13919 final bytes of the section. This may be a bug in BFD, but it is
13920 easier to fix it here since that is how the other a.out targets
13924 align
= bfd_section_alignment (segment
);
13925 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
13932 /* On the i386, PC-relative offsets are relative to the start of the
13933 next instruction. That is, the address of the offset, plus its
13934 size, since the offset is always the last part of the insn. */
13937 md_pcrel_from (fixS
*fixP
)
13939 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
13945 s_bss (int ignore ATTRIBUTE_UNUSED
)
13949 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13951 obj_elf_section_change_hook ();
13953 temp
= get_absolute_expression ();
13954 subseg_set (bss_section
, (subsegT
) temp
);
13955 demand_empty_rest_of_line ();
13960 /* Remember constant directive. */
13963 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
13965 if (last_insn
.kind
!= last_insn_directive
13966 && (bfd_section_flags (now_seg
) & SEC_CODE
))
13968 last_insn
.seg
= now_seg
;
13969 last_insn
.kind
= last_insn_directive
;
13970 last_insn
.name
= "constant directive";
13971 last_insn
.file
= as_where (&last_insn
.line
);
13972 if (lfence_before_ret
!= lfence_before_ret_none
)
13974 if (lfence_before_indirect_branch
!= lfence_branch_none
)
13975 as_warn (_("constant directive skips -mlfence-before-ret "
13976 "and -mlfence-before-indirect-branch"));
13978 as_warn (_("constant directive skips -mlfence-before-ret"));
13980 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
13981 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
13986 i386_validate_fix (fixS
*fixp
)
13988 if (fixp
->fx_subsy
)
13990 if (fixp
->fx_subsy
== GOT_symbol
)
13992 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
13996 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13997 if (fixp
->fx_tcbit2
)
13998 fixp
->fx_r_type
= (fixp
->fx_tcbit
13999 ? BFD_RELOC_X86_64_REX_GOTPCRELX
14000 : BFD_RELOC_X86_64_GOTPCRELX
);
14003 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
14008 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
14010 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
14012 fixp
->fx_subsy
= 0;
14015 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14018 /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
14019 to section. Since PLT32 relocation must be against symbols,
14020 turn such PLT32 relocation into PC32 relocation. */
14022 && (fixp
->fx_r_type
== BFD_RELOC_386_PLT32
14023 || fixp
->fx_r_type
== BFD_RELOC_X86_64_PLT32
)
14024 && symbol_section_p (fixp
->fx_addsy
))
14025 fixp
->fx_r_type
= BFD_RELOC_32_PCREL
;
14028 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
14029 && fixp
->fx_tcbit2
)
14030 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
14037 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
14040 bfd_reloc_code_real_type code
;
14042 switch (fixp
->fx_r_type
)
14044 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14045 case BFD_RELOC_SIZE32
:
14046 case BFD_RELOC_SIZE64
:
14047 if (S_IS_DEFINED (fixp
->fx_addsy
)
14048 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
14050 /* Resolve size relocation against local symbol to size of
14051 the symbol plus addend. */
14052 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
14053 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14054 && !fits_in_unsigned_long (value
))
14055 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14056 _("symbol size computation overflow"));
14057 fixp
->fx_addsy
= NULL
;
14058 fixp
->fx_subsy
= NULL
;
14059 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
14063 /* Fall through. */
14065 case BFD_RELOC_X86_64_PLT32
:
14066 case BFD_RELOC_X86_64_GOT32
:
14067 case BFD_RELOC_X86_64_GOTPCREL
:
14068 case BFD_RELOC_X86_64_GOTPCRELX
:
14069 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14070 case BFD_RELOC_386_PLT32
:
14071 case BFD_RELOC_386_GOT32
:
14072 case BFD_RELOC_386_GOT32X
:
14073 case BFD_RELOC_386_GOTOFF
:
14074 case BFD_RELOC_386_GOTPC
:
14075 case BFD_RELOC_386_TLS_GD
:
14076 case BFD_RELOC_386_TLS_LDM
:
14077 case BFD_RELOC_386_TLS_LDO_32
:
14078 case BFD_RELOC_386_TLS_IE_32
:
14079 case BFD_RELOC_386_TLS_IE
:
14080 case BFD_RELOC_386_TLS_GOTIE
:
14081 case BFD_RELOC_386_TLS_LE_32
:
14082 case BFD_RELOC_386_TLS_LE
:
14083 case BFD_RELOC_386_TLS_GOTDESC
:
14084 case BFD_RELOC_386_TLS_DESC_CALL
:
14085 case BFD_RELOC_X86_64_TLSGD
:
14086 case BFD_RELOC_X86_64_TLSLD
:
14087 case BFD_RELOC_X86_64_DTPOFF32
:
14088 case BFD_RELOC_X86_64_DTPOFF64
:
14089 case BFD_RELOC_X86_64_GOTTPOFF
:
14090 case BFD_RELOC_X86_64_TPOFF32
:
14091 case BFD_RELOC_X86_64_TPOFF64
:
14092 case BFD_RELOC_X86_64_GOTOFF64
:
14093 case BFD_RELOC_X86_64_GOTPC32
:
14094 case BFD_RELOC_X86_64_GOT64
:
14095 case BFD_RELOC_X86_64_GOTPCREL64
:
14096 case BFD_RELOC_X86_64_GOTPC64
:
14097 case BFD_RELOC_X86_64_GOTPLT64
:
14098 case BFD_RELOC_X86_64_PLTOFF64
:
14099 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14100 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14101 case BFD_RELOC_RVA
:
14102 case BFD_RELOC_VTABLE_ENTRY
:
14103 case BFD_RELOC_VTABLE_INHERIT
:
14105 case BFD_RELOC_32_SECREL
:
14107 code
= fixp
->fx_r_type
;
14109 case BFD_RELOC_X86_64_32S
:
14110 if (!fixp
->fx_pcrel
)
14112 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
14113 code
= fixp
->fx_r_type
;
14116 /* Fall through. */
14118 if (fixp
->fx_pcrel
)
14120 switch (fixp
->fx_size
)
14123 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14124 _("can not do %d byte pc-relative relocation"),
14126 code
= BFD_RELOC_32_PCREL
;
14128 case 1: code
= BFD_RELOC_8_PCREL
; break;
14129 case 2: code
= BFD_RELOC_16_PCREL
; break;
14130 case 4: code
= BFD_RELOC_32_PCREL
; break;
14132 case 8: code
= BFD_RELOC_64_PCREL
; break;
14138 switch (fixp
->fx_size
)
14141 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14142 _("can not do %d byte relocation"),
14144 code
= BFD_RELOC_32
;
14146 case 1: code
= BFD_RELOC_8
; break;
14147 case 2: code
= BFD_RELOC_16
; break;
14148 case 4: code
= BFD_RELOC_32
; break;
14150 case 8: code
= BFD_RELOC_64
; break;
14157 if ((code
== BFD_RELOC_32
14158 || code
== BFD_RELOC_32_PCREL
14159 || code
== BFD_RELOC_X86_64_32S
)
14161 && fixp
->fx_addsy
== GOT_symbol
)
14164 code
= BFD_RELOC_386_GOTPC
;
14166 code
= BFD_RELOC_X86_64_GOTPC32
;
14168 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
14170 && fixp
->fx_addsy
== GOT_symbol
)
14172 code
= BFD_RELOC_X86_64_GOTPC64
;
14175 rel
= XNEW (arelent
);
14176 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
14177 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
14179 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
14181 if (!use_rela_relocations
)
14183 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
14184 vtable entry to be used in the relocation's section offset. */
14185 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
14186 rel
->address
= fixp
->fx_offset
;
14187 #if defined (OBJ_COFF) && defined (TE_PE)
14188 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
14189 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
14194 /* Use the rela in 64bit mode. */
14197 if (disallow_64bit_reloc
)
14200 case BFD_RELOC_X86_64_DTPOFF64
:
14201 case BFD_RELOC_X86_64_TPOFF64
:
14202 case BFD_RELOC_64_PCREL
:
14203 case BFD_RELOC_X86_64_GOTOFF64
:
14204 case BFD_RELOC_X86_64_GOT64
:
14205 case BFD_RELOC_X86_64_GOTPCREL64
:
14206 case BFD_RELOC_X86_64_GOTPC64
:
14207 case BFD_RELOC_X86_64_GOTPLT64
:
14208 case BFD_RELOC_X86_64_PLTOFF64
:
14209 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14210 _("cannot represent relocation type %s in x32 mode"),
14211 bfd_get_reloc_code_name (code
));
14217 if (!fixp
->fx_pcrel
)
14218 rel
->addend
= fixp
->fx_offset
;
14222 case BFD_RELOC_X86_64_PLT32
:
14223 case BFD_RELOC_X86_64_GOT32
:
14224 case BFD_RELOC_X86_64_GOTPCREL
:
14225 case BFD_RELOC_X86_64_GOTPCRELX
:
14226 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14227 case BFD_RELOC_X86_64_TLSGD
:
14228 case BFD_RELOC_X86_64_TLSLD
:
14229 case BFD_RELOC_X86_64_GOTTPOFF
:
14230 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14231 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14232 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
14235 rel
->addend
= (section
->vma
14237 + fixp
->fx_addnumber
14238 + md_pcrel_from (fixp
));
14243 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
14244 if (rel
->howto
== NULL
)
14246 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14247 _("cannot represent relocation type %s"),
14248 bfd_get_reloc_code_name (code
));
14249 /* Set howto to a garbage value so that we can keep going. */
14250 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
14251 gas_assert (rel
->howto
!= NULL
);
14257 #include "tc-i386-intel.c"
14260 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
14262 int saved_naked_reg
;
14263 char saved_register_dot
;
14265 saved_naked_reg
= allow_naked_reg
;
14266 allow_naked_reg
= 1;
14267 saved_register_dot
= register_chars
['.'];
14268 register_chars
['.'] = '.';
14269 allow_pseudo_reg
= 1;
14270 expression_and_evaluate (exp
);
14271 allow_pseudo_reg
= 0;
14272 register_chars
['.'] = saved_register_dot
;
14273 allow_naked_reg
= saved_naked_reg
;
14275 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
14277 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
14279 exp
->X_op
= O_constant
;
14280 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
14281 .dw2_regnum
[flag_code
>> 1];
14284 exp
->X_op
= O_illegal
;
14289 tc_x86_frame_initial_instructions (void)
14291 static unsigned int sp_regno
[2];
14293 if (!sp_regno
[flag_code
>> 1])
14295 char *saved_input
= input_line_pointer
;
14296 char sp
[][4] = {"esp", "rsp"};
14299 input_line_pointer
= sp
[flag_code
>> 1];
14300 tc_x86_parse_to_dw2regnum (&exp
);
14301 gas_assert (exp
.X_op
== O_constant
);
14302 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
14303 input_line_pointer
= saved_input
;
14306 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
14307 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
14311 x86_dwarf2_addr_size (void)
14313 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14314 if (x86_elf_abi
== X86_64_X32_ABI
)
14317 return bfd_arch_bits_per_address (stdoutput
) / 8;
14321 i386_elf_section_type (const char *str
, size_t len
)
14323 if (flag_code
== CODE_64BIT
14324 && len
== sizeof ("unwind") - 1
14325 && strncmp (str
, "unwind", 6) == 0)
14326 return SHT_X86_64_UNWIND
;
14333 i386_solaris_fix_up_eh_frame (segT sec
)
14335 if (flag_code
== CODE_64BIT
)
14336 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
14342 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
14346 exp
.X_op
= O_secrel
;
14347 exp
.X_add_symbol
= symbol
;
14348 exp
.X_add_number
= 0;
14349 emit_expr (&exp
, size
);
14353 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14354 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14357 x86_64_section_letter (int letter
, const char **ptr_msg
)
14359 if (flag_code
== CODE_64BIT
)
14362 return SHF_X86_64_LARGE
;
14364 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14367 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14372 x86_64_section_word (char *str
, size_t len
)
14374 if (len
== 5 && flag_code
== CODE_64BIT
&& startswith (str
, "large"))
14375 return SHF_X86_64_LARGE
;
14381 handle_large_common (int small ATTRIBUTE_UNUSED
)
14383 if (flag_code
!= CODE_64BIT
)
14385 s_comm_internal (0, elf_common_parse
);
14386 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14390 static segT lbss_section
;
14391 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14392 asection
*saved_bss_section
= bss_section
;
14394 if (lbss_section
== NULL
)
14396 flagword applicable
;
14397 segT seg
= now_seg
;
14398 subsegT subseg
= now_subseg
;
14400 /* The .lbss section is for local .largecomm symbols. */
14401 lbss_section
= subseg_new (".lbss", 0);
14402 applicable
= bfd_applicable_section_flags (stdoutput
);
14403 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14404 seg_info (lbss_section
)->bss
= 1;
14406 subseg_set (seg
, subseg
);
14409 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14410 bss_section
= lbss_section
;
14412 s_comm_internal (0, elf_common_parse
);
14414 elf_com_section_ptr
= saved_com_section_ptr
;
14415 bss_section
= saved_bss_section
;
14418 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */